Camera: Apply physical rotation for depth/conf. maps
Depth and confidence maps require physical rotation in
case the source color image has similar physical rotation.
The EXIF orientation value will be kept consistent and
set to 0 in case of physical rotation.
Bug: 123699590
Test: Manual using application,
adb shell /data/nativetest64/cameraservice_test/cameraservice_test
--gtest_filter=DepthProcessorTest.*
Change-Id: I5cdd41c89368a1841d53f2195790aa1b55258495
diff --git a/services/camera/libcameraservice/api2/DepthCompositeStream.cpp b/services/camera/libcameraservice/api2/DepthCompositeStream.cpp
index 2eec0f7..9525ad2 100644
--- a/services/camera/libcameraservice/api2/DepthCompositeStream.cpp
+++ b/services/camera/libcameraservice/api2/DepthCompositeStream.cpp
@@ -339,6 +339,21 @@
} else {
depthPhoto.mIsLensDistortionValid = 0;
}
+ entry = inputFrame.result.find(ANDROID_JPEG_ORIENTATION);
+ if (entry.count > 0) {
+ // The camera jpeg orientation values must be within [0, 90, 180, 270].
+ switch (entry.data.i32[0]) {
+ case 0:
+ case 90:
+ case 180:
+ case 270:
+ depthPhoto.mOrientation = static_cast<DepthPhotoOrientation> (entry.data.i32[0]);
+ break;
+ default:
+ ALOGE("%s: Unexpected jpeg orientation value: %d, default to 0 degrees",
+ __FUNCTION__, entry.data.i32[0]);
+ }
+ }
size_t actualJpegSize = 0;
res = mDepthPhotoProcess(depthPhoto, finalJpegBufferSize, dstBuffer, &actualJpegSize);
diff --git a/services/camera/libcameraservice/common/DepthPhotoProcessor.cpp b/services/camera/libcameraservice/common/DepthPhotoProcessor.cpp
index 3af4220..6d96163 100644
--- a/services/camera/libcameraservice/common/DepthPhotoProcessor.cpp
+++ b/services/camera/libcameraservice/common/DepthPhotoProcessor.cpp
@@ -224,8 +224,106 @@
return ret;
}
+inline void unpackDepth16(uint16_t value, std::vector<float> *points /*out*/,
+ std::vector<float> *confidence /*out*/, float *near /*out*/, float *far /*out*/) {
+ // Android densely packed depth map. The units for the range are in
+ // millimeters and need to be scaled to meters.
+ // The confidence value is encoded in the 3 most significant bits.
+ // The confidence data needs to be additionally normalized with
+ // values 1.0f, 0.0f representing maximum and minimum confidence
+ // respectively.
+ auto point = static_cast<float>(value & 0x1FFF) / 1000.f;
+ points->push_back(point);
+
+ auto conf = (value >> 13) & 0x7;
+ float normConfidence = (conf == 0) ? 1.f : (static_cast<float>(conf) - 1) / 7.f;
+ confidence->push_back(normConfidence);
+
+ if (*near > point) {
+ *near = point;
+ }
+ if (*far < point) {
+ *far = point;
+ }
+}
+
+// Trivial case, read forward from top,left corner.
+void rotate0AndUnpack(DepthPhotoInputFrame inputFrame, std::vector<float> *points /*out*/,
+ std::vector<float> *confidence /*out*/, float *near /*out*/, float *far /*out*/) {
+ for (size_t i = 0; i < inputFrame.mDepthMapHeight; i++) {
+ for (size_t j = 0; j < inputFrame.mDepthMapWidth; j++) {
+ unpackDepth16(inputFrame.mDepthMapBuffer[i*inputFrame.mDepthMapStride + j], points,
+ confidence, near, far);
+ }
+ }
+}
+
+// 90 degrees CW rotation can be applied by starting to read from bottom, left corner
+// transposing rows and columns.
+void rotate90AndUnpack(DepthPhotoInputFrame inputFrame, std::vector<float> *points /*out*/,
+ std::vector<float> *confidence /*out*/, float *near /*out*/, float *far /*out*/) {
+ for (size_t i = 0; i < inputFrame.mDepthMapWidth; i++) {
+ for (ssize_t j = inputFrame.mDepthMapHeight-1; j >= 0; j--) {
+ unpackDepth16(inputFrame.mDepthMapBuffer[j*inputFrame.mDepthMapStride + i], points,
+ confidence, near, far);
+ }
+ }
+}
+
+// 180 CW degrees rotation can be applied by starting to read backwards from bottom, right corner.
+void rotate180AndUnpack(DepthPhotoInputFrame inputFrame, std::vector<float> *points /*out*/,
+ std::vector<float> *confidence /*out*/, float *near /*out*/, float *far /*out*/) {
+ for (ssize_t i = inputFrame.mDepthMapHeight-1; i >= 0; i--) {
+ for (ssize_t j = inputFrame.mDepthMapWidth-1; j >= 0; j--) {
+ unpackDepth16(inputFrame.mDepthMapBuffer[i*inputFrame.mDepthMapStride + j], points,
+ confidence, near, far);
+ }
+ }
+}
+
+// 270 degrees CW rotation can be applied by starting to read from top, right corner
+// transposing rows and columns.
+void rotate270AndUnpack(DepthPhotoInputFrame inputFrame, std::vector<float> *points /*out*/,
+ std::vector<float> *confidence /*out*/, float *near /*out*/, float *far /*out*/) {
+ for (ssize_t i = inputFrame.mDepthMapWidth-1; i >= 0; i--) {
+ for (size_t j = 0; j < inputFrame.mDepthMapHeight; j++) {
+ unpackDepth16(inputFrame.mDepthMapBuffer[j*inputFrame.mDepthMapStride + i], points,
+ confidence, near, far);
+ }
+ }
+}
+
+bool rotateAndUnpack(DepthPhotoInputFrame inputFrame, std::vector<float> *points /*out*/,
+ std::vector<float> *confidence /*out*/, float *near /*out*/, float *far /*out*/) {
+ switch (inputFrame.mOrientation) {
+ case DepthPhotoOrientation::DEPTH_ORIENTATION_0_DEGREES:
+ rotate0AndUnpack(inputFrame, points, confidence, near, far);
+ return false;
+ case DepthPhotoOrientation::DEPTH_ORIENTATION_90_DEGREES:
+ rotate90AndUnpack(inputFrame, points, confidence, near, far);
+ return true;
+ case DepthPhotoOrientation::DEPTH_ORIENTATION_180_DEGREES:
+ rotate180AndUnpack(inputFrame, points, confidence, near, far);
+ return false;
+ case DepthPhotoOrientation::DEPTH_ORIENTATION_270_DEGREES:
+ rotate270AndUnpack(inputFrame, points, confidence, near, far);
+ return true;
+ default:
+ ALOGE("%s: Unsupported depth photo rotation: %d, default to 0", __FUNCTION__,
+ inputFrame.mOrientation);
+ rotate0AndUnpack(inputFrame, points, confidence, near, far);
+ }
+
+ return false;
+}
+
std::unique_ptr<dynamic_depth::DepthMap> processDepthMapFrame(DepthPhotoInputFrame inputFrame,
- ExifOrientation exifOrientation, std::vector<std::unique_ptr<Item>> *items /*out*/) {
+ ExifOrientation exifOrientation, std::vector<std::unique_ptr<Item>> *items /*out*/,
+ bool *switchDimensions /*out*/) {
+ if ((items == nullptr) || (switchDimensions == nullptr)) {
+ return nullptr;
+ }
+
std::vector<float> points, confidence;
size_t pointCount = inputFrame.mDepthMapWidth * inputFrame.mDepthMapHeight;
@@ -233,29 +331,21 @@
confidence.reserve(pointCount);
float near = UINT16_MAX;
float far = .0f;
- for (size_t i = 0; i < inputFrame.mDepthMapHeight; i++) {
- for (size_t j = 0; j < inputFrame.mDepthMapWidth; j++) {
- // Android densely packed depth map. The units for the range are in
- // millimeters and need to be scaled to meters.
- // The confidence value is encoded in the 3 most significant bits.
- // The confidence data needs to be additionally normalized with
- // values 1.0f, 0.0f representing maximum and minimum confidence
- // respectively.
- auto value = inputFrame.mDepthMapBuffer[i*inputFrame.mDepthMapStride + j];
- auto point = static_cast<float>(value & 0x1FFF) / 1000.f;
- points.push_back(point);
+ *switchDimensions = false;
+ // Physical rotation of depth and confidence maps may be needed in case
+ // the EXIF orientation is set to 0 degrees and the depth photo orientation
+ // (source color image) has some different value.
+ if (exifOrientation == ExifOrientation::ORIENTATION_0_DEGREES) {
+ *switchDimensions = rotateAndUnpack(inputFrame, &points, &confidence, &near, &far);
+ } else {
+ rotate0AndUnpack(inputFrame, &points, &confidence, &near, &far);
+ }
- auto conf = (value >> 13) & 0x7;
- float normConfidence = (conf == 0) ? 1.f : (static_cast<float>(conf) - 1) / 7.f;
- confidence.push_back(normConfidence);
-
- if (near > point) {
- near = point;
- }
- if (far < point) {
- far = point;
- }
- }
+ size_t width = inputFrame.mDepthMapWidth;
+ size_t height = inputFrame.mDepthMapHeight;
+ if (*switchDimensions) {
+ width = inputFrame.mDepthMapHeight;
+ height = inputFrame.mDepthMapWidth;
}
if (near == far) {
@@ -281,8 +371,8 @@
depthParams.depth_image_data.resize(inputFrame.mMaxJpegSize);
depthParams.confidence_data.resize(inputFrame.mMaxJpegSize);
size_t actualJpegSize;
- auto ret = encodeGrayscaleJpeg(inputFrame.mDepthMapWidth, inputFrame.mDepthMapHeight,
- pointsQuantized.data(), depthParams.depth_image_data.data(), inputFrame.mMaxJpegSize,
+ auto ret = encodeGrayscaleJpeg(width, height, pointsQuantized.data(),
+ depthParams.depth_image_data.data(), inputFrame.mMaxJpegSize,
inputFrame.mJpegQuality, exifOrientation, actualJpegSize);
if (ret != NO_ERROR) {
ALOGE("%s: Depth map compression failed!", __FUNCTION__);
@@ -290,8 +380,8 @@
}
depthParams.depth_image_data.resize(actualJpegSize);
- ret = encodeGrayscaleJpeg(inputFrame.mDepthMapWidth, inputFrame.mDepthMapHeight,
- confidenceQuantized.data(), depthParams.confidence_data.data(), inputFrame.mMaxJpegSize,
+ ret = encodeGrayscaleJpeg(width, height, confidenceQuantized.data(),
+ depthParams.confidence_data.data(), inputFrame.mMaxJpegSize,
inputFrame.mJpegQuality, exifOrientation, actualJpegSize);
if (ret != NO_ERROR) {
ALOGE("%s: Confidence map compression failed!", __FUNCTION__);
@@ -321,7 +411,9 @@
ExifOrientation exifOrientation = getExifOrientation(
reinterpret_cast<const unsigned char*> (inputFrame.mMainJpegBuffer),
inputFrame.mMainJpegSize);
- cameraParams->depth_map = processDepthMapFrame(inputFrame, exifOrientation, &items);
+ bool switchDimensions;
+ cameraParams->depth_map = processDepthMapFrame(inputFrame, exifOrientation, &items,
+ &switchDimensions);
if (cameraParams->depth_map == nullptr) {
ALOGE("%s: Depth map processing failed!", __FUNCTION__);
return BAD_VALUE;
@@ -333,7 +425,13 @@
// [focalLengthX, focalLengthY, opticalCenterX, opticalCenterY, skew]
const dynamic_depth::Point<double> focalLength(inputFrame.mInstrinsicCalibration[0],
inputFrame.mInstrinsicCalibration[1]);
- const Dimension imageSize(inputFrame.mMainJpegWidth, inputFrame.mMainJpegHeight);
+ size_t width = inputFrame.mMainJpegWidth;
+ size_t height = inputFrame.mMainJpegHeight;
+ if (switchDimensions) {
+ width = inputFrame.mMainJpegHeight;
+ height = inputFrame.mMainJpegWidth;
+ }
+ const Dimension imageSize(width, height);
ImagingModelParams imagingParams(focalLength, imageSize);
imagingParams.principal_point.x = inputFrame.mInstrinsicCalibration[2];
imagingParams.principal_point.y = inputFrame.mInstrinsicCalibration[3];
diff --git a/services/camera/libcameraservice/common/DepthPhotoProcessor.h b/services/camera/libcameraservice/common/DepthPhotoProcessor.h
index 19889a1..6a2fbff 100644
--- a/services/camera/libcameraservice/common/DepthPhotoProcessor.h
+++ b/services/camera/libcameraservice/common/DepthPhotoProcessor.h
@@ -23,19 +23,27 @@
namespace android {
namespace camera3 {
+enum DepthPhotoOrientation {
+ DEPTH_ORIENTATION_0_DEGREES = 0,
+ DEPTH_ORIENTATION_90_DEGREES = 90,
+ DEPTH_ORIENTATION_180_DEGREES = 180,
+ DEPTH_ORIENTATION_270_DEGREES = 270,
+};
+
struct DepthPhotoInputFrame {
- const char* mMainJpegBuffer;
- size_t mMainJpegSize;
- size_t mMainJpegWidth, mMainJpegHeight;
- uint16_t* mDepthMapBuffer;
- size_t mDepthMapWidth, mDepthMapHeight, mDepthMapStride;
- size_t mMaxJpegSize;
- uint8_t mJpegQuality;
- uint8_t mIsLogical;
- float mInstrinsicCalibration[5];
- uint8_t mIsInstrinsicCalibrationValid;
- float mLensDistortion[5];
- uint8_t mIsLensDistortionValid;
+ const char* mMainJpegBuffer;
+ size_t mMainJpegSize;
+ size_t mMainJpegWidth, mMainJpegHeight;
+ uint16_t* mDepthMapBuffer;
+ size_t mDepthMapWidth, mDepthMapHeight, mDepthMapStride;
+ size_t mMaxJpegSize;
+ uint8_t mJpegQuality;
+ uint8_t mIsLogical;
+ float mInstrinsicCalibration[5];
+ uint8_t mIsInstrinsicCalibrationValid;
+ float mLensDistortion[5];
+ uint8_t mIsLensDistortionValid;
+ DepthPhotoOrientation mOrientation;
DepthPhotoInputFrame() :
mMainJpegBuffer(nullptr),
@@ -52,7 +60,8 @@
mInstrinsicCalibration{0.f},
mIsInstrinsicCalibrationValid(0),
mLensDistortion{0.f},
- mIsLensDistortionValid(0) {}
+ mIsLensDistortionValid(0),
+ mOrientation(DepthPhotoOrientation::DEPTH_ORIENTATION_0_DEGREES) {}
};
static const char *kDepthPhotoLibrary = "libdepthphoto.so";
diff --git a/services/camera/libcameraservice/tests/DepthProcessorTest.cpp b/services/camera/libcameraservice/tests/DepthProcessorTest.cpp
index 9898122..2162514 100644
--- a/services/camera/libcameraservice/tests/DepthProcessorTest.cpp
+++ b/services/camera/libcameraservice/tests/DepthProcessorTest.cpp
@@ -50,7 +50,7 @@
}
void generateColorJpegBuffer(int jpegQuality, ExifOrientation orientationValue, bool includeExif,
- std::vector<uint8_t> *colorJpegBuffer /*out*/) {
+ bool switchDimensions, std::vector<uint8_t> *colorJpegBuffer /*out*/) {
ASSERT_NE(colorJpegBuffer, nullptr);
std::array<uint8_t, kTestBufferNV12Size> colorSourceBuffer;
@@ -59,15 +59,23 @@
for (size_t i = 0; i < colorSourceBuffer.size(); i++) {
colorSourceBuffer[i] = uniDist(gen);
}
+
+ size_t width = kTestBufferWidth;
+ size_t height = kTestBufferHeight;
+ if (switchDimensions) {
+ width = kTestBufferHeight;
+ height = kTestBufferWidth;
+ }
+
NV12Compressor jpegCompressor;
if (includeExif) {
ASSERT_TRUE(jpegCompressor.compressWithExifOrientation(
- reinterpret_cast<const unsigned char*> (colorSourceBuffer.data()),
- kTestBufferWidth, kTestBufferHeight, jpegQuality, orientationValue));
+ reinterpret_cast<const unsigned char*> (colorSourceBuffer.data()), width, height,
+ jpegQuality, orientationValue));
} else {
ASSERT_TRUE(jpegCompressor.compress(
- reinterpret_cast<const unsigned char*> (colorSourceBuffer.data()),
- kTestBufferWidth, kTestBufferHeight, jpegQuality));
+ reinterpret_cast<const unsigned char*> (colorSourceBuffer.data()), width, height,
+ jpegQuality));
}
*colorJpegBuffer = std::move(jpegCompressor.getCompressedData());
@@ -109,7 +117,7 @@
std::vector<uint8_t> colorJpegBuffer;
generateColorJpegBuffer(jpegQuality, ExifOrientation::ORIENTATION_UNDEFINED,
- /*includeExif*/ false, &colorJpegBuffer);
+ /*includeExif*/ false, /*switchDimensions*/ false, &colorJpegBuffer);
std::array<uint16_t, kTestBufferDepthSize> depth16Buffer;
generateDepth16Buffer(&depth16Buffer);
@@ -153,7 +161,7 @@
std::vector<uint8_t> colorJpegBuffer;
generateColorJpegBuffer(jpegQuality, ExifOrientation::ORIENTATION_UNDEFINED,
- /*includeExif*/ false, &colorJpegBuffer);
+ /*includeExif*/ false, /*switchDimensions*/ false, &colorJpegBuffer);
std::array<uint16_t, kTestBufferDepthSize> depth16Buffer;
generateDepth16Buffer(&depth16Buffer);
@@ -209,12 +217,11 @@
for (auto exifOrientation : exifOrientations) {
std::vector<uint8_t> colorJpegBuffer;
generateColorJpegBuffer(jpegQuality, exifOrientation, /*includeExif*/ true,
- &colorJpegBuffer);
+ /*switchDimensions*/ false, &colorJpegBuffer);
if (exifOrientation != ExifOrientation::ORIENTATION_UNDEFINED) {
auto jpegExifOrientation = ExifOrientation::ORIENTATION_UNDEFINED;
- ASSERT_EQ(NV12Compressor::getExifOrientation(
- reinterpret_cast<const unsigned char*> (colorJpegBuffer.data()),
- colorJpegBuffer.size(), &jpegExifOrientation), OK);
+ ASSERT_EQ(NV12Compressor::getExifOrientation(colorJpegBuffer.data(),
+ colorJpegBuffer.size(), &jpegExifOrientation), OK);
ASSERT_EQ(exifOrientation, jpegExifOrientation);
}
@@ -252,8 +259,7 @@
//Depth and confidence images must have the same EXIF orientation as the source
auto depthJpegExifOrientation = ExifOrientation::ORIENTATION_UNDEFINED;
- ASSERT_EQ(NV12Compressor::getExifOrientation(
- reinterpret_cast<const unsigned char*> (depthPhotoBuffer.data() + mainJpegSize),
+ ASSERT_EQ(NV12Compressor::getExifOrientation(depthPhotoBuffer.data() + mainJpegSize,
depthMapSize, &depthJpegExifOrientation), OK);
if (exifOrientation == ORIENTATION_UNDEFINED) {
// In case of undefined or missing EXIF orientation, always expect 0 degrees in the
@@ -265,8 +271,8 @@
auto confidenceJpegExifOrientation = ExifOrientation::ORIENTATION_UNDEFINED;
ASSERT_EQ(NV12Compressor::getExifOrientation(
- reinterpret_cast<const unsigned char*> (depthPhotoBuffer.data() + mainJpegSize +
- depthMapSize), confidenceMapSize, &confidenceJpegExifOrientation), OK);
+ depthPhotoBuffer.data() + mainJpegSize + depthMapSize,
+ confidenceMapSize, &confidenceJpegExifOrientation), OK);
if (exifOrientation == ORIENTATION_UNDEFINED) {
// In case of undefined or missing EXIF orientation, always expect 0 degrees in the
// confidence map.
@@ -278,3 +284,99 @@
dlclose(libHandle);
}
+
+TEST(DepthProcessorTest, TestDephtPhotoPhysicalRotation) {
+ void *libHandle;
+ int jpegQuality = 95;
+
+ process_depth_photo_frame processFunc;
+ linkToDepthPhotoLibrary(&libHandle, &processFunc);
+ if (libHandle == nullptr) {
+ // Depth library no present, nothing more to test.
+ return;
+ }
+
+ // In case of physical rotation, the EXIF orientation must always be 0.
+ auto exifOrientation = ExifOrientation::ORIENTATION_0_DEGREES;
+ DepthPhotoOrientation depthOrientations[] = {
+ DepthPhotoOrientation::DEPTH_ORIENTATION_0_DEGREES,
+ DepthPhotoOrientation::DEPTH_ORIENTATION_90_DEGREES,
+ DepthPhotoOrientation::DEPTH_ORIENTATION_180_DEGREES,
+ DepthPhotoOrientation::DEPTH_ORIENTATION_270_DEGREES };
+ for (auto depthOrientation : depthOrientations) {
+ std::vector<uint8_t> colorJpegBuffer;
+ bool switchDimensions = false;
+ size_t expectedWidth = kTestBufferWidth;
+ size_t expectedHeight = kTestBufferHeight;
+ if ((depthOrientation == DepthPhotoOrientation::DEPTH_ORIENTATION_90_DEGREES) ||
+ (depthOrientation == DepthPhotoOrientation::DEPTH_ORIENTATION_270_DEGREES)) {
+ switchDimensions = true;
+ expectedWidth = kTestBufferHeight;
+ expectedHeight = kTestBufferWidth;
+ }
+ generateColorJpegBuffer(jpegQuality, exifOrientation, /*includeExif*/ true,
+ switchDimensions, &colorJpegBuffer);
+ auto jpegExifOrientation = ExifOrientation::ORIENTATION_UNDEFINED;
+ ASSERT_EQ(NV12Compressor::getExifOrientation(colorJpegBuffer.data(), colorJpegBuffer.size(),
+ &jpegExifOrientation), OK);
+ ASSERT_EQ(exifOrientation, jpegExifOrientation);
+
+ std::array<uint16_t, kTestBufferDepthSize> depth16Buffer;
+ generateDepth16Buffer(&depth16Buffer);
+
+ DepthPhotoInputFrame inputFrame;
+ inputFrame.mMainJpegBuffer = reinterpret_cast<const char*> (colorJpegBuffer.data());
+ inputFrame.mMainJpegSize = colorJpegBuffer.size();
+ // Worst case both depth and confidence maps have the same size as the main color image.
+ inputFrame.mMaxJpegSize = inputFrame.mMainJpegSize * 3;
+ inputFrame.mMainJpegWidth = kTestBufferWidth;
+ inputFrame.mMainJpegHeight = kTestBufferHeight;
+ inputFrame.mJpegQuality = jpegQuality;
+ inputFrame.mDepthMapBuffer = depth16Buffer.data();
+ inputFrame.mDepthMapWidth = inputFrame.mDepthMapStride = kTestBufferWidth;
+ inputFrame.mDepthMapHeight = kTestBufferHeight;
+ inputFrame.mOrientation = depthOrientation;
+
+ std::vector<uint8_t> depthPhotoBuffer(inputFrame.mMaxJpegSize);
+ size_t actualDepthPhotoSize = 0;
+ ASSERT_EQ(processFunc(inputFrame, depthPhotoBuffer.size(), depthPhotoBuffer.data(),
+ &actualDepthPhotoSize), 0);
+ ASSERT_TRUE((actualDepthPhotoSize > 0) &&
+ (depthPhotoBuffer.size() >= actualDepthPhotoSize));
+
+ size_t mainJpegSize = 0;
+ ASSERT_EQ(NV12Compressor::findJpegSize(depthPhotoBuffer.data(), actualDepthPhotoSize,
+ &mainJpegSize), OK);
+ ASSERT_TRUE((mainJpegSize > 0) && (mainJpegSize < actualDepthPhotoSize));
+ size_t depthMapSize = 0;
+ ASSERT_EQ(NV12Compressor::findJpegSize(depthPhotoBuffer.data() + mainJpegSize,
+ actualDepthPhotoSize - mainJpegSize, &depthMapSize), OK);
+ ASSERT_TRUE((depthMapSize > 0) && (depthMapSize < (actualDepthPhotoSize - mainJpegSize)));
+ size_t confidenceMapSize = actualDepthPhotoSize - (mainJpegSize + depthMapSize);
+
+ //Depth and confidence images must have the same EXIF orientation as the source
+ auto depthJpegExifOrientation = ExifOrientation::ORIENTATION_UNDEFINED;
+ ASSERT_EQ(NV12Compressor::getExifOrientation(depthPhotoBuffer.data() + mainJpegSize,
+ depthMapSize, &depthJpegExifOrientation), OK);
+ ASSERT_EQ(depthJpegExifOrientation, exifOrientation);
+ size_t depthMapWidth, depthMapHeight;
+ ASSERT_EQ(NV12Compressor::getJpegImageDimensions(depthPhotoBuffer.data() + mainJpegSize,
+ depthMapSize, &depthMapWidth, &depthMapHeight), OK);
+ ASSERT_EQ(depthMapWidth, expectedWidth);
+ ASSERT_EQ(depthMapHeight, expectedHeight);
+
+ auto confidenceJpegExifOrientation = ExifOrientation::ORIENTATION_UNDEFINED;
+ ASSERT_EQ(NV12Compressor::getExifOrientation(
+ depthPhotoBuffer.data() + mainJpegSize + depthMapSize, confidenceMapSize,
+ &confidenceJpegExifOrientation), OK);
+ ASSERT_EQ(confidenceJpegExifOrientation, exifOrientation);
+ size_t confidenceMapWidth, confidenceMapHeight;
+ ASSERT_EQ(NV12Compressor::getJpegImageDimensions(
+ depthPhotoBuffer.data() + mainJpegSize + depthMapSize, confidenceMapSize,
+ &confidenceMapWidth, &confidenceMapHeight), OK);
+ ASSERT_EQ(confidenceMapWidth, expectedWidth);
+ ASSERT_EQ(confidenceMapHeight, expectedHeight);
+ }
+
+ dlclose(libHandle);
+}
diff --git a/services/camera/libcameraservice/tests/NV12Compressor.cpp b/services/camera/libcameraservice/tests/NV12Compressor.cpp
index b9f27fa..0a41a1f 100644
--- a/services/camera/libcameraservice/tests/NV12Compressor.cpp
+++ b/services/camera/libcameraservice/tests/NV12Compressor.cpp
@@ -315,7 +315,37 @@
return OK;
}
-status_t NV12Compressor::getExifOrientation(const unsigned char *jpegBuffer, size_t jpegBufferSize,
+status_t NV12Compressor::getJpegImageDimensions(uint8_t *jpegBuffer,
+ size_t jpegBufferSize, size_t *width /*out*/, size_t *height /*out*/) {
+ if ((jpegBuffer == nullptr) || (width == nullptr) || (height == nullptr) ||
+ (jpegBufferSize == 0u)) {
+ return BAD_VALUE;
+ }
+
+ // Scan JPEG buffer until Start of Frame
+ bool foundSOF = false;
+ size_t currentPos;
+ for (currentPos = 0; currentPos <= jpegBufferSize - kMarkerLength; currentPos++) {
+ if (checkStartOfFrame(jpegBuffer + currentPos)) {
+ foundSOF = true;
+ currentPos += kMarkerLength;
+ break;
+ }
+ }
+
+ if (!foundSOF) {
+ ALOGE("%s: Start of Frame not found", __func__);
+ return BAD_VALUE;
+ }
+
+ sof_t *startOfFrame = reinterpret_cast<sof_t *> (jpegBuffer + currentPos);
+ *width = ntohs(startOfFrame->width);
+ *height = ntohs(startOfFrame->height);
+
+ return OK;
+}
+
+status_t NV12Compressor::getExifOrientation(uint8_t *jpegBuffer, size_t jpegBufferSize,
ExifOrientation *exifValue /*out*/) {
if ((jpegBuffer == nullptr) || (exifValue == nullptr) || (jpegBufferSize == 0u)) {
return BAD_VALUE;
diff --git a/services/camera/libcameraservice/tests/NV12Compressor.h b/services/camera/libcameraservice/tests/NV12Compressor.h
index 92804c1..ee22d5e 100644
--- a/services/camera/libcameraservice/tests/NV12Compressor.h
+++ b/services/camera/libcameraservice/tests/NV12Compressor.h
@@ -48,13 +48,20 @@
*/
const std::vector<unsigned char>& getCompressedData() const;
+ // Utility methods
static android::status_t findJpegSize(uint8_t *jpegBuffer, size_t maxSize,
size_t *size /*out*/);
-
- static android::status_t getExifOrientation(const unsigned char *jpegBuffer,
+ static android::status_t getExifOrientation(uint8_t *jpegBuffer,
size_t jpegBufferSize, android::camera3::ExifOrientation *exifValue /*out*/);
+ /* Get Jpeg image dimensions from the first Start Of Frame. Please note that due to the
+ * way the jpeg buffer is scanned if the image contains a thumbnail, then the size returned
+ * will be of the thumbnail and not the main image.
+ */
+ static android::status_t getJpegImageDimensions(uint8_t *jpegBuffer, size_t jpegBufferSize,
+ size_t *width /*out*/, size_t *height /*out*/);
+
private:
struct DestinationManager : jpeg_destination_mgr {
@@ -79,14 +86,26 @@
static const uint8_t kMarker = 0xFF; // First byte of marker
static const uint8_t kStartOfImage = 0xD8; // Start of Image
static const uint8_t kEndOfImage = 0xD9; // End of Image
+ static const uint8_t kStartOfFrame = 0xC0; // Start of Frame
struct __attribute__((packed)) segment_t {
uint8_t marker[kMarkerLength];
uint16_t length;
};
+ struct __attribute__((packed)) sof_t {
+ uint16_t length;
+ uint8_t precision;
+ uint16_t height;
+ uint16_t width;
+ };
- // check for Start of Image marker
+ // check for start of image marker
+ static bool checkStartOfFrame(uint8_t* buf) {
+ return buf[0] == kMarker && buf[1] == kStartOfFrame;
+ }
+
+ // check for start of image marker
static bool checkJpegStart(uint8_t* buf) {
return buf[0] == kMarker && buf[1] == kStartOfImage;
}