Merge "Use DesktopWindowing flag for Camera Compat for Freeform." into main
diff --git a/camera/camera_platform.aconfig b/camera/camera_platform.aconfig
index d28b67d..9687b83 100644
--- a/camera/camera_platform.aconfig
+++ b/camera/camera_platform.aconfig
@@ -35,13 +35,6 @@
flag {
namespace: "camera_platform"
- name: "session_hal_buf_manager"
- description: "Enable or disable HAL buffer manager as requested by the camera HAL"
- bug: "311263114"
-}
-
-flag {
- namespace: "camera_platform"
name: "inject_session_params"
description: "Enable session parameter injection via reconfiguration"
bug: "308984721"
diff --git a/services/audiopolicy/service/SpatializerPoseController.cpp b/services/audiopolicy/service/SpatializerPoseController.cpp
index 874bde4..368dde0 100644
--- a/services/audiopolicy/service/SpatializerPoseController.cpp
+++ b/services/audiopolicy/service/SpatializerPoseController.cpp
@@ -22,6 +22,7 @@
#define LOG_TAG "SpatializerPoseController"
//#define LOG_NDEBUG 0
+#include <audio_utils/mutex.h>
#include <cutils/properties.h>
#include <sensor/Sensor.h>
#include <media/MediaMetricsItem.h>
@@ -131,20 +132,22 @@
Pose3f headToStage;
std::optional<HeadTrackingMode> modeIfChanged;
{
- std::unique_lock lock(mMutex);
- if (maxUpdatePeriod.has_value()) {
- mCondVar.wait_for(lock, maxUpdatePeriod.value(),
- [this] { return mShouldExit || mShouldCalculate; });
- } else {
- mCondVar.wait(lock, [this] { return mShouldExit || mShouldCalculate; });
+ audio_utils::unique_lock ul(mMutex);
+ while (true) {
+ if (mShouldExit) {
+ ALOGV("Exiting thread");
+ return;
+ }
+ if (mShouldCalculate) {
+ std::tie(headToStage, modeIfChanged) = calculate_l();
+ break;
+ }
+ if (maxUpdatePeriod.has_value()) {
+ mCondVar.wait_for(ul, maxUpdatePeriod.value());
+ } else {
+ mCondVar.wait(ul);
+ }
}
- if (mShouldExit) {
- ALOGV("Exiting thread");
- return;
- }
-
- // Calculate.
- std::tie(headToStage, modeIfChanged) = calculate_l();
}
// Invoke the callbacks outside the lock.
@@ -173,7 +176,7 @@
SpatializerPoseController::~SpatializerPoseController() {
{
- std::unique_lock lock(mMutex);
+ std::lock_guard lock(mMutex);
mShouldExit = true;
mCondVar.notify_all();
}
@@ -278,8 +281,10 @@
}
void SpatializerPoseController::waitUntilCalculated() {
- std::unique_lock lock(mMutex);
- mCondVar.wait(lock, [this] { return mCalculated; });
+ audio_utils::unique_lock ul(mMutex);
+ while (!mCalculated) {
+ mCondVar.wait(ul);
+ }
}
std::tuple<media::Pose3f, std::optional<media::HeadTrackingMode>>
@@ -358,14 +363,15 @@
}
}
-std::string SpatializerPoseController::toString(unsigned level) const {
+std::string SpatializerPoseController::toString(unsigned level) const NO_THREAD_SAFETY_ANALYSIS {
std::string prefixSpace(level, ' ');
std::string ss = prefixSpace + "SpatializerPoseController:\n";
bool needUnlock = false;
prefixSpace += ' ';
auto now = std::chrono::steady_clock::now();
- if (!mMutex.try_lock_until(now + media::kSpatializerDumpSysTimeOutInSecond)) {
+ if (!audio_utils::std_mutex_timed_lock(mMutex, std::chrono::nanoseconds(
+ media::kSpatializerDumpSysTimeOutInSecond).count())) {
ss.append(prefixSpace).append("try_lock failed, dumpsys maybe INACCURATE!\n");
} else {
needUnlock = true;
diff --git a/services/audiopolicy/service/SpatializerPoseController.h b/services/audiopolicy/service/SpatializerPoseController.h
index 7fa4f86..9955cd8 100644
--- a/services/audiopolicy/service/SpatializerPoseController.h
+++ b/services/audiopolicy/service/SpatializerPoseController.h
@@ -118,34 +118,34 @@
std::string toString(unsigned level) const;
private:
- mutable std::timed_mutex mMutex;
+ mutable std::mutex mMutex;
Listener* const mListener;
const std::chrono::microseconds mSensorPeriod;
- std::unique_ptr<media::HeadTrackingProcessor> mProcessor;
- int32_t mHeadSensor = media::SensorPoseProvider::INVALID_HANDLE;
- int32_t mScreenSensor = media::SensorPoseProvider::INVALID_HANDLE;
- std::optional<media::HeadTrackingMode> mActualMode;
- std::condition_variable_any mCondVar;
- bool mShouldCalculate = true;
- bool mShouldExit = false;
- bool mCalculated = false;
+ std::unique_ptr<media::HeadTrackingProcessor> mProcessor GUARDED_BY(mMutex);
+ int32_t mHeadSensor GUARDED_BY(mMutex) = media::SensorPoseProvider::INVALID_HANDLE;
+ int32_t mScreenSensor GUARDED_BY(mMutex) = media::SensorPoseProvider::INVALID_HANDLE;
+ std::optional<media::HeadTrackingMode> mActualMode GUARDED_BY(mMutex);
+ std::condition_variable mCondVar GUARDED_BY(mMutex);
+ bool mShouldCalculate GUARDED_BY(mMutex) = true;
+ bool mShouldExit GUARDED_BY(mMutex) = false;
+ bool mCalculated GUARDED_BY(mMutex) = false;
- media::VectorRecorder mHeadSensorRecorder{
+ media::VectorRecorder mHeadSensorRecorder GUARDED_BY(mMutex) {
8 /* vectorSize */, std::chrono::seconds(1), 10 /* maxLogLine */,
{ 3, 6, 7 } /* delimiterIdx */};
- media::VectorRecorder mHeadSensorDurableRecorder{
+ media::VectorRecorder mHeadSensorDurableRecorder GUARDED_BY(mMutex) {
8 /* vectorSize */, std::chrono::minutes(1), 10 /* maxLogLine */,
{ 3, 6, 7 } /* delimiterIdx */};
- media::VectorRecorder mScreenSensorRecorder{
+ media::VectorRecorder mScreenSensorRecorder GUARDED_BY(mMutex) {
4 /* vectorSize */, std::chrono::seconds(1), 10 /* maxLogLine */,
{ 3 } /* delimiterIdx */};
- media::VectorRecorder mScreenSensorDurableRecorder{
+ media::VectorRecorder mScreenSensorDurableRecorder GUARDED_BY(mMutex) {
4 /* vectorSize */, std::chrono::minutes(1), 10 /* maxLogLine */,
{ 3 } /* delimiterIdx */};
// Next to last variable as releasing this stops the callbacks
- std::unique_ptr<media::SensorPoseProvider> mPoseProvider;
+ std::unique_ptr<media::SensorPoseProvider> mPoseProvider GUARDED_BY(mMutex);
// It's important that mThread is the last variable in this class
// since we starts mThread in initializer list
@@ -158,7 +158,8 @@
* Calculates the new outputs and updates internal state. Must be called with the lock held.
* Returns values that should be passed to the respective callbacks.
*/
- std::tuple<media::Pose3f, std::optional<media::HeadTrackingMode>> calculate_l();
+ std::tuple<media::Pose3f, std::optional<media::HeadTrackingMode>> calculate_l()
+ REQUIRES(mMutex);
};
} // namespace android
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index bb54f25..5721745 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -1648,7 +1648,7 @@
bool signalPipelineDrain = false;
if (!active &&
(mUseHalBufManager ||
- (flags::session_hal_buf_manager() && mHalBufManagedStreamIds.size() != 0))) {
+ (mHalBufManagedStreamIds.size() != 0))) {
auto streamIds = mOutputStreams.getStreamIds();
if (mStatus == STATUS_ACTIVE) {
mRequestThread->signalPipelineDrain(streamIds);
@@ -2598,25 +2598,23 @@
// It is possible that use hal buffer manager behavior was changed by the
// configureStreams call.
mUseHalBufManager = config.use_hal_buf_manager;
- if (flags::session_hal_buf_manager()) {
- bool prevSessionHalBufManager = (mHalBufManagedStreamIds.size() != 0);
- // It is possible that configureStreams() changed config.hal_buffer_managed_streams
- mHalBufManagedStreamIds = config.hal_buffer_managed_streams;
+ bool prevSessionHalBufManager = (mHalBufManagedStreamIds.size() != 0);
+ // It is possible that configureStreams() changed config.hal_buffer_managed_streams
+ mHalBufManagedStreamIds = config.hal_buffer_managed_streams;
- bool thisSessionHalBufManager = mHalBufManagedStreamIds.size() != 0;
+ bool thisSessionHalBufManager = mHalBufManagedStreamIds.size() != 0;
- if (prevSessionHalBufManager && !thisSessionHalBufManager) {
- mRequestBufferSM.deInit();
- } else if (!prevSessionHalBufManager && thisSessionHalBufManager) {
- res = mRequestBufferSM.initialize(mStatusTracker);
- if (res != OK) {
- SET_ERR_L("%s: Camera %s: RequestBuffer State machine couldn't be initialized!",
- __FUNCTION__, mId.c_str());
- return res;
- }
+ if (prevSessionHalBufManager && !thisSessionHalBufManager) {
+ mRequestBufferSM.deInit();
+ } else if (!prevSessionHalBufManager && thisSessionHalBufManager) {
+ res = mRequestBufferSM.initialize(mStatusTracker);
+ if (res != OK) {
+ SET_ERR_L("%s: Camera %s: RequestBuffer State machine couldn't be initialized!",
+ __FUNCTION__, mId.c_str());
+ return res;
}
- mRequestThread->setHalBufferManagedStreams(mHalBufManagedStreamIds);
}
+ mRequestThread->setHalBufferManagedStreams(mHalBufManagedStreamIds);
// Finish all stream configuration immediately.
// TODO: Try to relax this later back to lazy completion, which should be
@@ -3021,8 +3019,7 @@
}
bool Camera3Device::HalInterface::isHalBufferManagedStream(int32_t streamId) const {
- return (mUseHalBufManager || (flags::session_hal_buf_manager() &&
- contains(mHalBufManagedStreamIds, streamId)));
+ return (mUseHalBufManager || contains(mHalBufManagedStreamIds, streamId));
}
status_t Camera3Device::HalInterface::popInflightBuffer(
@@ -4184,8 +4181,7 @@
}
}
bool passSurfaceMap =
- mUseHalBufManager ||
- (flags::session_hal_buf_manager() && containsHalBufferManagedStream);
+ mUseHalBufManager || containsHalBufferManagedStream;
auto expectedDurationInfo = calculateExpectedDurationRange(settings);
res = parent->registerInFlight(halRequest->frame_number,
totalNumBuffers, captureRequest->mResultExtras,
@@ -4301,7 +4297,7 @@
void Camera3Device::RequestThread::signalPipelineDrain(const std::vector<int>& streamIds) {
if (!mUseHalBufManager &&
- (flags::session_hal_buf_manager() && mHalBufManagedStreamIds.size() == 0)) {
+ (mHalBufManagedStreamIds.size() == 0)) {
ALOGE("%s called for camera device not supporting HAL buffer management", __FUNCTION__);
return;
}
@@ -4459,8 +4455,7 @@
Camera3Stream *stream = Camera3Stream::cast((*outputBuffers)[i].stream);
int32_t streamId = stream->getId();
bool skipBufferForStream =
- mUseHalBufManager || (flags::session_hal_buf_manager() &&
- contains(mHalBufManagedStreamIds, streamId));
+ mUseHalBufManager || (contains(mHalBufManagedStreamIds, streamId));
if (skipBufferForStream) {
// No output buffer can be returned when using HAL buffer manager for its stream
continue;
diff --git a/services/camera/libcameraservice/device3/Camera3OutputUtils.cpp b/services/camera/libcameraservice/device3/Camera3OutputUtils.cpp
index 31707ec..62226e1 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputUtils.cpp
+++ b/services/camera/libcameraservice/device3/Camera3OutputUtils.cpp
@@ -894,8 +894,7 @@
if (outputBuffers[i].buffer == nullptr) {
if (!useHalBufManager &&
- !(flags::session_hal_buf_manager() &&
- contains(halBufferManagedStreams, streamId))) {
+ !contains(halBufferManagedStreams, streamId)) {
// With HAL buffer management API, HAL sometimes will have to return buffers that
// has not got a output buffer handle filled yet. This is though illegal if HAL
// buffer management API is not being used.
diff --git a/services/camera/libcameraservice/device3/Camera3OutputUtilsTemplated.h b/services/camera/libcameraservice/device3/Camera3OutputUtilsTemplated.h
index aca7a67..2d75d03 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputUtilsTemplated.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputUtilsTemplated.h
@@ -212,8 +212,7 @@
bool noBufferReturned = false;
buffer_handle_t *buffer = nullptr;
if (states.useHalBufManager ||
- (flags::session_hal_buf_manager() &&
- contains(states.halBufManagedStreamIds, bSrc.streamId))) {
+ contains(states.halBufManagedStreamIds, bSrc.streamId)) {
// This is suspicious most of the time but can be correct during flush where HAL
// has to return capture result before a buffer is requested
if (bSrc.bufferId == BUFFER_ID_NO_BUFFER) {
@@ -303,8 +302,7 @@
for (const auto& buf : buffers) {
if (!states.useHalBufManager &&
- !(flags::session_hal_buf_manager() &&
- contains(states.halBufManagedStreamIds, buf.streamId))) {
+ !contains(states.halBufManagedStreamIds, buf.streamId)) {
ALOGE("%s: Camera %s does not support HAL buffer management for stream id %d",
__FUNCTION__, states.cameraId.c_str(), buf.streamId);
return;
diff --git a/services/camera/libcameraservice/device3/aidl/AidlCamera3Device.cpp b/services/camera/libcameraservice/device3/aidl/AidlCamera3Device.cpp
index 868b7ef..e52e9a2 100644
--- a/services/camera/libcameraservice/device3/aidl/AidlCamera3Device.cpp
+++ b/services/camera/libcameraservice/device3/aidl/AidlCamera3Device.cpp
@@ -918,12 +918,6 @@
camera3::camera_stream_t *src = config->streams[i];
Camera3Stream* cam3stream = Camera3Stream::cast(src);
- // For stream configurations with multi res streams, hal buffer manager has to be used.
- if (!flags::session_hal_buf_manager() && cam3stream->getHalStreamGroupId() != -1 &&
- src->stream_type != CAMERA_STREAM_INPUT) {
- mUseHalBufManager = true;
- config->use_hal_buf_manager = true;
- }
cam3stream->setBufferFreedListener(this);
int streamId = cam3stream->getId();
StreamType streamType;
@@ -1002,8 +996,7 @@
err.getMessage());
return AidlProviderInfo::mapToStatusT(err);
}
- if (flags::session_hal_buf_manager() && interfaceVersion >= AIDL_DEVICE_SESSION_V3
- && mSupportSessionHalBufManager) {
+ if (interfaceVersion >= AIDL_DEVICE_SESSION_V3 && mSupportSessionHalBufManager) {
err = mAidlSession->configureStreamsV2(requestedConfiguration, &configureStreamsRet);
finalConfiguration = std::move(configureStreamsRet.halStreams);
} else {
@@ -1015,18 +1008,16 @@
return AidlProviderInfo::mapToStatusT(err);
}
- if (flags::session_hal_buf_manager()) {
- std::set<int32_t> halBufferManagedStreamIds;
- for (const auto &halStream: finalConfiguration) {
- if ((interfaceVersion >= AIDL_DEVICE_SESSION_V3 &&
- mSupportSessionHalBufManager && halStream.enableHalBufferManager)
- || mUseHalBufManager) {
- halBufferManagedStreamIds.insert(halStream.id);
- }
+ std::set<int32_t> halBufferManagedStreamIds;
+ for (const auto &halStream: finalConfiguration) {
+ if ((interfaceVersion >= AIDL_DEVICE_SESSION_V3 &&
+ mSupportSessionHalBufManager && halStream.enableHalBufferManager)
+ || mUseHalBufManager) {
+ halBufferManagedStreamIds.insert(halStream.id);
}
- mHalBufManagedStreamIds = std::move(halBufferManagedStreamIds);
- config->hal_buffer_managed_streams = mHalBufManagedStreamIds;
}
+ mHalBufManagedStreamIds = std::move(halBufferManagedStreamIds);
+ config->hal_buffer_managed_streams = mHalBufManagedStreamIds;
// And convert output stream configuration from AIDL
for (size_t i = 0; i < config->num_streams; i++) {
camera3::camera_stream_t *dst = config->streams[i];
@@ -1096,10 +1087,8 @@
}
dstStream->setUsage(
mapProducerToFrameworkUsage(src.producerUsage));
- if (flags::session_hal_buf_manager()) {
- dstStream->setHalBufferManager(
- contains(config->hal_buffer_managed_streams, streamId));
- }
+ dstStream->setHalBufferManager(
+ contains(config->hal_buffer_managed_streams, streamId));
}
dst->max_buffers = src.maxBuffers;
}
diff --git a/services/camera/libcameraservice/device3/hidl/HidlCamera3Device.cpp b/services/camera/libcameraservice/device3/hidl/HidlCamera3Device.cpp
index f507df9..6986d3c 100644
--- a/services/camera/libcameraservice/device3/hidl/HidlCamera3Device.cpp
+++ b/services/camera/libcameraservice/device3/hidl/HidlCamera3Device.cpp
@@ -929,7 +929,7 @@
switch (src->stream_type) {
case CAMERA_STREAM_OUTPUT:
streamType = StreamType::OUTPUT;
- if (flags::session_hal_buf_manager() && mUseHalBufManager) {
+ if (mUseHalBufManager) {
mHalBufManagedStreamIds.insert(streamId);
}
break;
diff --git a/services/camera/libcameraservice/fuzzer/DepthProcessorFuzzer.cpp b/services/camera/libcameraservice/fuzzer/DepthProcessorFuzzer.cpp
index 5c5e177..0d445eb 100644
--- a/services/camera/libcameraservice/fuzzer/DepthProcessorFuzzer.cpp
+++ b/services/camera/libcameraservice/fuzzer/DepthProcessorFuzzer.cpp
@@ -23,12 +23,13 @@
using namespace android;
using namespace android::camera3;
-static const float kMinRatio = 0.1f;
-static const float kMaxRatio = 0.9f;
-
static const uint8_t kTotalDepthJpegBufferCount = 3;
static const uint8_t kIntrinsicCalibrationSize = 5;
static const uint8_t kLensDistortionSize = 5;
+static const uint8_t kDqtSize = 5;
+
+static const uint16_t kMinDimension = 2;
+static const uint16_t kMaxDimension = 1024;
static const DepthPhotoOrientation kDepthPhotoOrientations[] = {
DepthPhotoOrientation::DEPTH_ORIENTATION_0_DEGREES,
@@ -45,40 +46,97 @@
}
}
+void fillRandomBufferData(std::vector<unsigned char>& buffer, size_t bytes,
+ FuzzedDataProvider& fdp) {
+ while (bytes--) {
+ buffer.push_back(fdp.ConsumeIntegral<uint8_t>());
+ }
+}
+
+void addMarkersInJpegBuffer(std::vector<uint8_t>& Buffer, size_t& height, size_t& width,
+ FuzzedDataProvider& fdp) {
+ /* Add the SOI Marker */
+ Buffer.push_back(0xFF);
+ Buffer.push_back(0xD8);
+
+ /* Add the JFIF Header */
+ const char header[] = {0xFF, 0xE0, 0x00, 0x10, 0x4A, 0x46, 0x49, 0x46, 0x00,
+ 0x01, 0x01, 0x00, 0x00, 0x01, 0x00, 0x01, 0x00, 0x00};
+ Buffer.insert(Buffer.end(), header, header + sizeof(header));
+
+ /* Add the SOF Marker */
+ Buffer.push_back(0xFF);
+ Buffer.push_back(0xC0);
+
+ Buffer.push_back(0x00); // Length high byte
+ Buffer.push_back(0x11); // Length low byte
+
+ Buffer.push_back(fdp.ConsumeIntegral<uint8_t>()); // Random precision
+
+ height = fdp.ConsumeIntegralInRange<uint16_t>(kMinDimension, kMaxDimension); // Image height
+ Buffer.push_back((height & 0xFF00) >> 8);
+ Buffer.push_back(height & 0x00FF);
+
+ width = fdp.ConsumeIntegralInRange<uint16_t>(kMinDimension, kMaxDimension); // Image width
+ Buffer.push_back((width & 0xFF00) >> 8);
+ Buffer.push_back(width & 0x00FF);
+
+ Buffer.push_back(0x03); // Number of components (3 for Y, Cb, Cr)
+
+ /* Add DQT (Define Quantization Table) Marker */
+ Buffer.push_back(0xFF);
+ Buffer.push_back(0xDB);
+
+ Buffer.push_back(0x00); // Length high byte
+ Buffer.push_back(0x43); // Length low byte
+
+ Buffer.push_back(0x00); // Precision and table identifier
+
+ fillRandomBufferData(Buffer, kDqtSize, fdp); // Random DQT data
+
+ /* Add the Component Data */
+ unsigned char componentData[] = {0x01, 0x21, 0x00, 0x02, 0x11, 0x01, 0x03, 0x11, 0x01};
+ Buffer.insert(Buffer.end(), componentData, componentData + sizeof(componentData));
+
+ /* Add the DHT (Define Huffman Table) Marker */
+ Buffer.push_back(0xFF);
+ Buffer.push_back(0xC4);
+ Buffer.push_back(0x00); // Length high byte
+ Buffer.push_back(0x1F); // Length low byte
+
+ Buffer.push_back(0x00); // Table class and identifier
+ fillRandomBufferData(Buffer, 16, fdp); // 16 codes for lengths
+ fillRandomBufferData(Buffer, 12, fdp); // Values
+
+ /* Add the SOS (Start of Scan) Marker */
+ Buffer.push_back(0xFF);
+ Buffer.push_back(0xDA);
+ Buffer.push_back(0x00); // Length high byte
+ Buffer.push_back(0x0C); // Length low byte
+
+ Buffer.push_back(0x03); // Number of components (3 for Y, Cb, Cr)
+ unsigned char sosComponentData[] = {0x01, 0x00, 0x02, 0x11, 0x03, 0x11};
+ Buffer.insert(Buffer.end(), sosComponentData, sosComponentData + sizeof(sosComponentData));
+
+ Buffer.push_back(0x00); // Spectral selection start
+ Buffer.push_back(0x3F); // Spectral selection end
+ Buffer.push_back(0x00); // Successive approximation
+
+ size_t remainingBytes = (256 * 1024) - Buffer.size() - 2; // Subtract 2 for EOI marker
+ fillRandomBufferData(Buffer, remainingBytes, fdp);
+
+ /* Add the EOI Marker */
+ Buffer.push_back(0xFF);
+ Buffer.push_back(0xD9);
+}
+
extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
FuzzedDataProvider fdp(data, size);
DepthPhotoInputFrame inputFrame;
- /**
- * Consuming 80% of the data to set mMainJpegBuffer. This ensures that we
- * don't completely exhaust data and use the rest 20% for fuzzing of APIs.
- */
- std::vector<uint8_t> buffer = fdp.ConsumeBytes<uint8_t>((size * 80) / 100);
- inputFrame.mMainJpegBuffer = reinterpret_cast<const char*>(buffer.data());
-
- /**
- * Calculate height and width based on buffer size and a ratio within [0.1, 0.9].
- * The ratio adjusts the dimensions while maintaining a relationship to the total buffer size.
- */
- const float ratio = fdp.ConsumeFloatingPointInRange<float>(kMinRatio, kMaxRatio);
- const size_t height = std::sqrt(buffer.size()) * ratio;
- const size_t width = std::sqrt(buffer.size()) / ratio;
-
- inputFrame.mMainJpegHeight = height;
- inputFrame.mMainJpegWidth = width;
- inputFrame.mMainJpegSize = buffer.size();
- // Worst case both depth and confidence maps have the same size as the main color image.
- inputFrame.mMaxJpegSize = inputFrame.mMainJpegSize * kTotalDepthJpegBufferCount;
-
- std::vector<uint16_t> depth16Buffer(height * width);
- generateDepth16Buffer(&depth16Buffer, height * width, fdp);
- inputFrame.mDepthMapBuffer = depth16Buffer.data();
- inputFrame.mDepthMapHeight = height;
- inputFrame.mDepthMapWidth = inputFrame.mDepthMapStride = width;
-
inputFrame.mIsLogical = fdp.ConsumeBool();
-
+ inputFrame.mJpegQuality = fdp.ConsumeProbability<float>() * 100;
inputFrame.mOrientation = fdp.PickValueInArray<DepthPhotoOrientation>(kDepthPhotoOrientations);
if (fdp.ConsumeBool()) {
@@ -95,6 +153,23 @@
inputFrame.mIsLensDistortionValid = 1;
}
+ std::vector<uint8_t> Buffer;
+ size_t height, width;
+ addMarkersInJpegBuffer(Buffer, height, width, fdp);
+ inputFrame.mMainJpegBuffer = reinterpret_cast<const char*>(Buffer.data());
+
+ inputFrame.mMainJpegHeight = height;
+ inputFrame.mMainJpegWidth = width;
+ inputFrame.mMainJpegSize = Buffer.size();
+ // Worst case both depth and confidence maps have the same size as the main color image.
+ inputFrame.mMaxJpegSize = inputFrame.mMainJpegSize * kTotalDepthJpegBufferCount;
+
+ std::vector<uint16_t> depth16Buffer(height * width);
+ generateDepth16Buffer(&depth16Buffer, height * width, fdp);
+ inputFrame.mDepthMapBuffer = depth16Buffer.data();
+ inputFrame.mDepthMapHeight = height;
+ inputFrame.mDepthMapWidth = inputFrame.mDepthMapStride = width;
+
std::vector<uint8_t> depthPhotoBuffer(inputFrame.mMaxJpegSize);
size_t actualDepthPhotoSize = 0;