blob: b12bb50f09977cfbe1bebc1a46aee28be2811d5f [file] [log] [blame]
Emilian Peev538c90e2018-12-17 18:03:19 +00001/*
2 * Copyright (C) 2018 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define LOG_TAG "Camera3-DepthCompositeStream"
18#define ATRACE_TAG ATRACE_TAG_CAMERA
19//#define LOG_NDEBUG 0
20
21#include "api1/client2/JpegProcessor.h"
22#include "common/CameraProviderManager.h"
23
24#include <dynamic_depth/camera.h>
25#include <dynamic_depth/cameras.h>
26#include <dynamic_depth/container.h>
27#include <dynamic_depth/device.h>
28#include <dynamic_depth/dimension.h>
29#include <dynamic_depth/dynamic_depth.h>
30#include <dynamic_depth/point.h>
31#include <dynamic_depth/pose.h>
32#include <dynamic_depth/profile.h>
33#include <dynamic_depth/profiles.h>
34#include <xmpmeta/xmp_data.h>
35#include <xmpmeta/xmp_writer.h>
36
37#include <jpeglib.h>
38#include <math.h>
39
40#include <gui/Surface.h>
41#include <utils/Log.h>
42#include <utils/Trace.h>
43
44#include "DepthCompositeStream.h"
45
46using dynamic_depth::Camera;
47using dynamic_depth::Cameras;
48using dynamic_depth::CameraParams;
49using dynamic_depth::Container;
50using dynamic_depth::DepthFormat;
51using dynamic_depth::DepthMapParams;
52using dynamic_depth::DepthUnits;
53using dynamic_depth::Device;
54using dynamic_depth::DeviceParams;
55using dynamic_depth::Dimension;
56using dynamic_depth::Image;
57using dynamic_depth::ImagingModelParams;
58using dynamic_depth::Pose;
59using dynamic_depth::Profile;
60using dynamic_depth::Profiles;
61
62namespace android {
63namespace camera3 {
64
65DepthCompositeStream::DepthCompositeStream(wp<CameraDeviceBase> device,
66 wp<hardware::camera2::ICameraDeviceCallbacks> cb) :
67 CompositeStream(device, cb),
68 mBlobStreamId(-1),
69 mBlobSurfaceId(-1),
70 mDepthStreamId(-1),
71 mDepthSurfaceId(-1),
72 mBlobWidth(0),
73 mBlobHeight(0),
74 mDepthBufferAcquired(false),
75 mBlobBufferAcquired(false),
76 mProducerListener(new ProducerListener()),
77 mMaxJpegSize(-1),
78 mIsLogicalCamera(false) {
79 sp<CameraDeviceBase> cameraDevice = device.promote();
80 if (cameraDevice.get() != nullptr) {
81 CameraMetadata staticInfo = cameraDevice->info();
82 auto entry = staticInfo.find(ANDROID_JPEG_MAX_SIZE);
83 if (entry.count > 0) {
84 mMaxJpegSize = entry.data.i32[0];
85 } else {
86 ALOGW("%s: Maximum jpeg size absent from camera characteristics", __FUNCTION__);
87 }
88
89 entry = staticInfo.find(ANDROID_LENS_INTRINSIC_CALIBRATION);
90 if (entry.count == 5) {
91 mInstrinsicCalibration.reserve(5);
92 mInstrinsicCalibration.insert(mInstrinsicCalibration.end(), entry.data.f,
93 entry.data.f + 5);
94 } else {
95 ALOGW("%s: Intrinsic calibration absent from camera characteristics!", __FUNCTION__);
96 }
97
98 entry = staticInfo.find(ANDROID_LENS_DISTORTION);
99 if (entry.count == 5) {
100 mLensDistortion.reserve(5);
101 mLensDistortion.insert(mLensDistortion.end(), entry.data.f, entry.data.f + 5);
102 } else {
103 ALOGW("%s: Lens distortion absent from camera characteristics!", __FUNCTION__);
104 }
105
106 entry = staticInfo.find(ANDROID_REQUEST_AVAILABLE_CAPABILITIES);
107 for (size_t i = 0; i < entry.count; ++i) {
108 uint8_t capability = entry.data.u8[i];
109 if (capability == ANDROID_REQUEST_AVAILABLE_CAPABILITIES_LOGICAL_MULTI_CAMERA) {
110 mIsLogicalCamera = true;
111 break;
112 }
113 }
114
115 getSupportedDepthSizes(staticInfo, &mSupportedDepthSizes);
116 }
117}
118
119DepthCompositeStream::~DepthCompositeStream() {
120 mBlobConsumer.clear(),
121 mBlobSurface.clear(),
122 mBlobStreamId = -1;
123 mBlobSurfaceId = -1;
124 mDepthConsumer.clear();
125 mDepthSurface.clear();
126 mDepthConsumer = nullptr;
127 mDepthSurface = nullptr;
128}
129
130void DepthCompositeStream::compilePendingInputLocked() {
131 CpuConsumer::LockedBuffer imgBuffer;
132
133 while (!mInputJpegBuffers.empty() && !mBlobBufferAcquired) {
134 auto it = mInputJpegBuffers.begin();
135 auto res = mBlobConsumer->lockNextBuffer(&imgBuffer);
136 if (res == NOT_ENOUGH_DATA) {
137 // Can not lock any more buffers.
138 break;
139 } else if (res != OK) {
140 ALOGE("%s: Error locking blob image buffer: %s (%d)", __FUNCTION__,
141 strerror(-res), res);
142 mPendingInputFrames[*it].error = true;
143 mInputDepthBuffers.erase(it);
144 continue;
145 }
146
147 if (*it != imgBuffer.timestamp) {
148 ALOGW("%s: Expecting jpeg buffer with time stamp: %" PRId64 " received buffer with "
149 "time stamp: %" PRId64, __FUNCTION__, *it, imgBuffer.timestamp);
150 }
151
152 if ((mPendingInputFrames.find(imgBuffer.timestamp) != mPendingInputFrames.end()) &&
153 (mPendingInputFrames[imgBuffer.timestamp].error)) {
154 mBlobConsumer->unlockBuffer(imgBuffer);
155 } else {
156 mPendingInputFrames[imgBuffer.timestamp].jpegBuffer = imgBuffer;
157 mBlobBufferAcquired = true;
158 }
159 mInputJpegBuffers.erase(it);
160 }
161
162 while (!mInputDepthBuffers.empty() && !mDepthBufferAcquired) {
163 auto it = mInputDepthBuffers.begin();
164 auto res = mDepthConsumer->lockNextBuffer(&imgBuffer);
165 if (res == NOT_ENOUGH_DATA) {
166 // Can not lock any more buffers.
167 break;
168 } else if (res != OK) {
169 ALOGE("%s: Error receiving depth image buffer: %s (%d)", __FUNCTION__,
170 strerror(-res), res);
171 mPendingInputFrames[*it].error = true;
172 mInputDepthBuffers.erase(it);
173 continue;
174 }
175
176 if (*it != imgBuffer.timestamp) {
177 ALOGW("%s: Expecting depth buffer with time stamp: %" PRId64 " received buffer with "
178 "time stamp: %" PRId64, __FUNCTION__, *it, imgBuffer.timestamp);
179 }
180
181 if ((mPendingInputFrames.find(imgBuffer.timestamp) != mPendingInputFrames.end()) &&
182 (mPendingInputFrames[imgBuffer.timestamp].error)) {
183 mDepthConsumer->unlockBuffer(imgBuffer);
184 } else {
185 mPendingInputFrames[imgBuffer.timestamp].depthBuffer = imgBuffer;
186 mDepthBufferAcquired = true;
187 }
188 mInputDepthBuffers.erase(it);
189 }
190
191 while (!mCaptureResults.empty()) {
192 auto it = mCaptureResults.begin();
193 // Negative timestamp indicates that something went wrong during the capture result
194 // collection process.
195 if (it->first >= 0) {
196 mPendingInputFrames[it->first].frameNumber = std::get<0>(it->second);
197 mPendingInputFrames[it->first].result = std::get<1>(it->second);
198 }
199 mCaptureResults.erase(it);
200 }
201
202 while (!mFrameNumberMap.empty()) {
203 auto it = mFrameNumberMap.begin();
204 mPendingInputFrames[it->second].frameNumber = it->first;
205 mFrameNumberMap.erase(it);
206 }
207
208 auto it = mErrorFrameNumbers.begin();
209 while (it != mErrorFrameNumbers.end()) {
210 bool frameFound = false;
211 for (auto &inputFrame : mPendingInputFrames) {
212 if (inputFrame.second.frameNumber == *it) {
213 inputFrame.second.error = true;
214 frameFound = true;
215 break;
216 }
217 }
218
219 if (frameFound) {
220 it = mErrorFrameNumbers.erase(it);
221 } else {
222 ALOGW("%s: Not able to find failing input with frame number: %" PRId64, __FUNCTION__,
223 *it);
224 it++;
225 }
226 }
227}
228
229bool DepthCompositeStream::getNextReadyInputLocked(int64_t *currentTs /*inout*/) {
230 if (currentTs == nullptr) {
231 return false;
232 }
233
234 bool newInputAvailable = false;
235 for (const auto& it : mPendingInputFrames) {
236 if ((!it.second.error) && (it.second.depthBuffer.data != nullptr) &&
237 (it.second.jpegBuffer.data != nullptr) && (it.first < *currentTs)) {
238 *currentTs = it.first;
239 newInputAvailable = true;
240 }
241 }
242
243 return newInputAvailable;
244}
245
246int64_t DepthCompositeStream::getNextFailingInputLocked(int64_t *currentTs /*inout*/) {
247 int64_t ret = -1;
248 if (currentTs == nullptr) {
249 return ret;
250 }
251
252 for (const auto& it : mPendingInputFrames) {
253 if (it.second.error && !it.second.errorNotified && (it.first < *currentTs)) {
254 *currentTs = it.first;
255 ret = it.second.frameNumber;
256 }
257 }
258
259 return ret;
260}
261
262status_t DepthCompositeStream::encodeGrayscaleJpeg(size_t width, size_t height, uint8_t *in,
263 void *out, const size_t maxOutSize, uint8_t jpegQuality, size_t &actualSize) {
264 status_t ret;
265 // libjpeg is a C library so we use C-style "inheritance" by
266 // putting libjpeg's jpeg_destination_mgr first in our custom
267 // struct. This allows us to cast jpeg_destination_mgr* to
268 // CustomJpegDestMgr* when we get it passed to us in a callback.
269 struct CustomJpegDestMgr : public jpeg_destination_mgr {
270 JOCTET *mBuffer;
271 size_t mBufferSize;
272 size_t mEncodedSize;
273 bool mSuccess;
274 } dmgr;
275
276 jpeg_compress_struct cinfo = {};
277 jpeg_error_mgr jerr;
278
279 // Initialize error handling with standard callbacks, but
280 // then override output_message (to print to ALOG) and
281 // error_exit to set a flag and print a message instead
282 // of killing the whole process.
283 cinfo.err = jpeg_std_error(&jerr);
284
285 cinfo.err->output_message = [](j_common_ptr cinfo) {
286 char buffer[JMSG_LENGTH_MAX];
287
288 /* Create the message */
289 (*cinfo->err->format_message)(cinfo, buffer);
290 ALOGE("libjpeg error: %s", buffer);
291 };
292
293 cinfo.err->error_exit = [](j_common_ptr cinfo) {
294 (*cinfo->err->output_message)(cinfo);
295 if(cinfo->client_data) {
296 auto & dmgr = *static_cast<CustomJpegDestMgr*>(cinfo->client_data);
297 dmgr.mSuccess = false;
298 }
299 };
300
301 // Now that we initialized some callbacks, let's create our compressor
302 jpeg_create_compress(&cinfo);
303 dmgr.mBuffer = static_cast<JOCTET*>(out);
304 dmgr.mBufferSize = maxOutSize;
305 dmgr.mEncodedSize = 0;
306 dmgr.mSuccess = true;
307 cinfo.client_data = static_cast<void*>(&dmgr);
308
309 // These lambdas become C-style function pointers and as per C++11 spec
310 // may not capture anything.
311 dmgr.init_destination = [](j_compress_ptr cinfo) {
312 auto & dmgr = static_cast<CustomJpegDestMgr&>(*cinfo->dest);
313 dmgr.next_output_byte = dmgr.mBuffer;
314 dmgr.free_in_buffer = dmgr.mBufferSize;
315 ALOGV("%s:%d jpeg start: %p [%zu]",
316 __FUNCTION__, __LINE__, dmgr.mBuffer, dmgr.mBufferSize);
317 };
318
319 dmgr.empty_output_buffer = [](j_compress_ptr cinfo __unused) {
320 ALOGV("%s:%d Out of buffer", __FUNCTION__, __LINE__);
321 return 0;
322 };
323
324 dmgr.term_destination = [](j_compress_ptr cinfo) {
325 auto & dmgr = static_cast<CustomJpegDestMgr&>(*cinfo->dest);
326 dmgr.mEncodedSize = dmgr.mBufferSize - dmgr.free_in_buffer;
327 ALOGV("%s:%d Done with jpeg: %zu", __FUNCTION__, __LINE__, dmgr.mEncodedSize);
328 };
329 cinfo.dest = reinterpret_cast<struct jpeg_destination_mgr*>(&dmgr);
330 cinfo.image_width = width;
331 cinfo.image_height = height;
332 cinfo.input_components = 1;
333 cinfo.in_color_space = JCS_GRAYSCALE;
334
335 // Initialize defaults and then override what we want
336 jpeg_set_defaults(&cinfo);
337
338 jpeg_set_quality(&cinfo, jpegQuality, 1);
339 jpeg_set_colorspace(&cinfo, JCS_GRAYSCALE);
340 cinfo.raw_data_in = 0;
341 cinfo.dct_method = JDCT_IFAST;
342
343 cinfo.comp_info[0].h_samp_factor = 1;
344 cinfo.comp_info[1].h_samp_factor = 1;
345 cinfo.comp_info[2].h_samp_factor = 1;
346 cinfo.comp_info[0].v_samp_factor = 1;
347 cinfo.comp_info[1].v_samp_factor = 1;
348 cinfo.comp_info[2].v_samp_factor = 1;
349
350 jpeg_start_compress(&cinfo, TRUE);
351
352 for (size_t i = 0; i < cinfo.image_height; i++) {
353 auto currentRow = static_cast<JSAMPROW>(in + i*width);
354 jpeg_write_scanlines(&cinfo, &currentRow, /*num_lines*/1);
355 }
356
357 jpeg_finish_compress(&cinfo);
358
359 actualSize = dmgr.mEncodedSize;
360 if (dmgr.mSuccess) {
361 ret = NO_ERROR;
362 } else {
363 ret = UNKNOWN_ERROR;
364 }
365
366 return ret;
367}
368
369std::unique_ptr<DepthMap> DepthCompositeStream::processDepthMapFrame(
370 const CpuConsumer::LockedBuffer &depthMapBuffer, size_t maxJpegSize, uint8_t jpegQuality,
371 std::vector<std::unique_ptr<Item>> *items /*out*/) {
372 std::vector<float> points, confidence;
373
374 size_t pointCount = depthMapBuffer.width * depthMapBuffer.height;
375 points.reserve(pointCount);
376 confidence.reserve(pointCount);
377 float near = UINT16_MAX;
378 float far = .0f;
379 uint16_t *data = reinterpret_cast<uint16_t *> (depthMapBuffer.data);
380 for (size_t i = 0; i < depthMapBuffer.height; i++) {
381 for (size_t j = 0; j < depthMapBuffer.width; j++) {
382 // Android densely packed depth map. The units for the range are in
383 // millimeters and need to be scaled to meters.
384 // The confidence value is encoded in the 3 most significant bits.
385 // The confidence data needs to be additionally normalized with
386 // values 1.0f, 0.0f representing maximum and minimum confidence
387 // respectively.
388 auto value = data[i*depthMapBuffer.stride + j];
389 auto point = static_cast<float>(value & 0x1FFF) / 1000.f;
390 points.push_back(point);
391
392 auto conf = (value >> 13) & 0x7;
393 float normConfidence = (conf == 0) ? 1.f : (static_cast<float>(conf) - 1) / 7.f;
394 confidence.push_back(normConfidence);
395
396 if (near > point) {
397 near = point;
398 }
399 if (far < point) {
400 far = point;
401 }
402 }
403 }
404
405 if (near == far) {
406 ALOGE("%s: Near and far range values must not match!", __FUNCTION__);
407 return nullptr;
408 }
409
410 std::vector<uint8_t> pointsQuantized, confidenceQuantized;
411 pointsQuantized.reserve(pointCount); confidenceQuantized.reserve(pointCount);
412 auto pointIt = points.begin();
413 auto confidenceIt = confidence.begin();
414 while ((pointIt != points.end()) && (confidenceIt != confidence.end())) {
415 pointsQuantized.push_back(floorf(((far * (*pointIt - near)) /
416 (*pointIt * (far - near))) * 255.0f));
417 confidenceQuantized.push_back(floorf(*confidenceIt * 255.0f));
418 confidenceIt++; pointIt++;
419 }
420
421 DepthMapParams depthParams(DepthFormat::kRangeInverse, near, far, DepthUnits::kMeters,
422 "android/depthmap");
423 depthParams.confidence_uri = "android/confidencemap";
424 depthParams.mime = "image/jpeg";
425 depthParams.depth_image_data.resize(maxJpegSize);
426 depthParams.confidence_data.resize(maxJpegSize);
427 size_t actualJpegSize;
428 auto ret = encodeGrayscaleJpeg(depthMapBuffer.width, depthMapBuffer.height,
429 pointsQuantized.data(), depthParams.depth_image_data.data(), maxJpegSize, jpegQuality,
430 actualJpegSize);
431 if (ret != NO_ERROR) {
432 ALOGE("%s: Depth map compression failed!", __FUNCTION__);
433 return nullptr;
434 }
435 depthParams.depth_image_data.resize(actualJpegSize);
436
437 ret = encodeGrayscaleJpeg(depthMapBuffer.width, depthMapBuffer.height,
438 confidenceQuantized.data(), depthParams.confidence_data.data(), maxJpegSize,
439 jpegQuality, actualJpegSize);
440 if (ret != NO_ERROR) {
441 ALOGE("%s: Confidence map compression failed!", __FUNCTION__);
442 return nullptr;
443 }
444 depthParams.confidence_data.resize(actualJpegSize);
445
446 return DepthMap::FromData(depthParams, items);
447}
448
449status_t DepthCompositeStream::processInputFrame(const InputFrame &inputFrame) {
450 status_t res;
451 sp<ANativeWindow> outputANW = mOutputSurface;
452 ANativeWindowBuffer *anb;
453 int fenceFd;
454 void *dstBuffer;
455 auto imgBuffer = inputFrame.jpegBuffer;
456
457 auto jpegSize = android::camera2::JpegProcessor::findJpegSize(inputFrame.jpegBuffer.data,
458 inputFrame.jpegBuffer.width);
459 if (jpegSize == 0) {
460 ALOGW("%s: Failed to find input jpeg size, default to using entire buffer!", __FUNCTION__);
461 jpegSize = inputFrame.jpegBuffer.width;
462 }
463
464 std::vector<std::unique_ptr<Item>> items;
465 std::vector<std::unique_ptr<Camera>> cameraList;
466 auto image = Image::FromDataForPrimaryImage("android/mainimage", &items);
467 std::unique_ptr<CameraParams> cameraParams(new CameraParams(std::move(image)));
468 if (cameraParams == nullptr) {
469 ALOGE("%s: Failed to initialize camera parameters", __FUNCTION__);
470 return BAD_VALUE;
471 }
472
473 size_t maxDepthJpegSize;
474 if (mMaxJpegSize > 0) {
475 maxDepthJpegSize = mMaxJpegSize;
476 } else {
477 maxDepthJpegSize = std::max<size_t> (jpegSize,
478 inputFrame.depthBuffer.width * inputFrame.depthBuffer.height * 3 / 2);
479 }
480 uint8_t jpegQuality = 100;
481 auto entry = inputFrame.result.find(ANDROID_JPEG_QUALITY);
482 if (entry.count > 0) {
483 jpegQuality = entry.data.u8[0];
484 }
485 cameraParams->depth_map = processDepthMapFrame(inputFrame.depthBuffer, maxDepthJpegSize,
486 jpegQuality, &items);
487 if (cameraParams->depth_map == nullptr) {
488 ALOGE("%s: Depth map processing failed!", __FUNCTION__);
489 return BAD_VALUE;
490 }
491 cameraParams->imaging_model = getImagingModel();
492
493 if (mIsLogicalCamera) {
494 cameraParams->trait = dynamic_depth::CameraTrait::LOGICAL;
495 } else {
496 cameraParams->trait = dynamic_depth::CameraTrait::PHYSICAL;
497 }
498
499 cameraList.emplace_back(Camera::FromData(std::move(cameraParams)));
500
501 auto deviceParams = std::make_unique<DeviceParams> (Cameras::FromCameraArray(&cameraList));
502 deviceParams->container = Container::FromItems(&items);
503 std::vector<std::unique_ptr<Profile>> profileList;
504 profileList.emplace_back(Profile::FromData("DepthPhoto", {0}));
505 deviceParams->profiles = Profiles::FromProfileArray(&profileList);
506 std::unique_ptr<Device> device = Device::FromData(std::move(deviceParams));
507 if (device == nullptr) {
508 ALOGE("%s: Failed to initialize camera device", __FUNCTION__);
509 return BAD_VALUE;
510 }
511
512 std::istringstream inputJpegStream(std::string(reinterpret_cast<const char *> (imgBuffer.data),
513 jpegSize));
514 std::ostringstream outputJpegStream;
515 if (!WriteImageAndMetadataAndContainer(&inputJpegStream, device.get(), &outputJpegStream)) {
516 ALOGE("%s: Failed writing depth output", __FUNCTION__);
517 return BAD_VALUE;
518 }
519
520 size_t finalJpegSize = static_cast<size_t> (outputJpegStream.tellp()) +
521 sizeof(struct camera3_jpeg_blob);
522
523 ALOGV("%s: Final jpeg size: %zu", __func__, finalJpegSize);
524 if ((res = native_window_set_buffers_dimensions(mOutputSurface.get(), finalJpegSize, 1))
525 != OK) {
526 ALOGE("%s: Unable to configure stream buffer dimensions"
527 " %zux%u for stream %d", __FUNCTION__, finalJpegSize, 1U, mBlobStreamId);
528 return res;
529 }
530
531 res = outputANW->dequeueBuffer(mOutputSurface.get(), &anb, &fenceFd);
532 if (res != OK) {
533 ALOGE("%s: Error retrieving output buffer: %s (%d)", __FUNCTION__, strerror(-res),
534 res);
535 return res;
536 }
537
538 sp<GraphicBuffer> gb = GraphicBuffer::from(anb);
539 res = gb->lockAsync(GRALLOC_USAGE_SW_WRITE_OFTEN, &dstBuffer, fenceFd);
540 if (res != OK) {
541 ALOGE("%s: Error trying to lock output buffer fence: %s (%d)", __FUNCTION__,
542 strerror(-res), res);
543 outputANW->cancelBuffer(mOutputSurface.get(), anb, /*fence*/ -1);
544 return res;
545 }
546
547 if ((gb->getWidth() < finalJpegSize) || (gb->getHeight() != 1)) {
548 ALOGE("%s: Blob buffer size mismatch, expected %dx%d received %zux%u", __FUNCTION__,
549 gb->getWidth(), gb->getHeight(), finalJpegSize, 1U);
550 outputANW->cancelBuffer(mOutputSurface.get(), anb, /*fence*/ -1);
551 return BAD_VALUE;
552 }
553
554 // Copy final jpeg with embedded depth data in the composite stream output buffer
555 uint8_t* header = static_cast<uint8_t *> (dstBuffer) +
556 (gb->getWidth() - sizeof(struct camera3_jpeg_blob));
557 struct camera3_jpeg_blob *blob = reinterpret_cast<struct camera3_jpeg_blob*> (header);
558 blob->jpeg_blob_id = CAMERA3_JPEG_BLOB_ID;
559 blob->jpeg_size = static_cast<uint32_t> (outputJpegStream.tellp());
560 memcpy(dstBuffer, outputJpegStream.str().c_str(), blob->jpeg_size);
561 outputANW->queueBuffer(mOutputSurface.get(), anb, /*fence*/ -1);
562
563 return res;
564}
565
566void DepthCompositeStream::releaseInputFrameLocked(InputFrame *inputFrame /*out*/) {
567 if (inputFrame == nullptr) {
568 return;
569 }
570
571 if (inputFrame->depthBuffer.data != nullptr) {
572 mDepthConsumer->unlockBuffer(inputFrame->depthBuffer);
573 inputFrame->depthBuffer.data = nullptr;
574 mDepthBufferAcquired = false;
575 }
576
577 if (inputFrame->jpegBuffer.data != nullptr) {
578 mBlobConsumer->unlockBuffer(inputFrame->jpegBuffer);
579 inputFrame->jpegBuffer.data = nullptr;
580 mBlobBufferAcquired = false;
581 }
582
583 if ((inputFrame->error || mErrorState) && !inputFrame->errorNotified) {
584 notifyError(inputFrame->frameNumber);
585 inputFrame->errorNotified = true;
586 }
587}
588
589void DepthCompositeStream::releaseInputFramesLocked(int64_t currentTs) {
590 auto it = mPendingInputFrames.begin();
591 while (it != mPendingInputFrames.end()) {
592 if (it->first <= currentTs) {
593 releaseInputFrameLocked(&it->second);
594 it = mPendingInputFrames.erase(it);
595 } else {
596 it++;
597 }
598 }
599}
600
601bool DepthCompositeStream::threadLoop() {
602 int64_t currentTs = INT64_MAX;
603 bool newInputAvailable = false;
604
605 {
606 Mutex::Autolock l(mMutex);
607
608 if (mErrorState) {
609 // In case we landed in error state, return any pending buffers and
610 // halt all further processing.
611 compilePendingInputLocked();
612 releaseInputFramesLocked(currentTs);
613 return false;
614 }
615
616 while (!newInputAvailable) {
617 compilePendingInputLocked();
618 newInputAvailable = getNextReadyInputLocked(&currentTs);
619 if (!newInputAvailable) {
620 auto failingFrameNumber = getNextFailingInputLocked(&currentTs);
621 if (failingFrameNumber >= 0) {
622 // We cannot erase 'mPendingInputFrames[currentTs]' at this point because it is
623 // possible for two internal stream buffers to fail. In such scenario the
624 // composite stream should notify the client about a stream buffer error only
625 // once and this information is kept within 'errorNotified'.
626 // Any present failed input frames will be removed on a subsequent call to
627 // 'releaseInputFramesLocked()'.
628 releaseInputFrameLocked(&mPendingInputFrames[currentTs]);
629 currentTs = INT64_MAX;
630 }
631
632 auto ret = mInputReadyCondition.waitRelative(mMutex, kWaitDuration);
633 if (ret == TIMED_OUT) {
634 return true;
635 } else if (ret != OK) {
636 ALOGE("%s: Timed wait on condition failed: %s (%d)", __FUNCTION__,
637 strerror(-ret), ret);
638 return false;
639 }
640 }
641 }
642 }
643
644 auto res = processInputFrame(mPendingInputFrames[currentTs]);
645 Mutex::Autolock l(mMutex);
646 if (res != OK) {
647 ALOGE("%s: Failed processing frame with timestamp: %" PRIu64 ": %s (%d)", __FUNCTION__,
648 currentTs, strerror(-res), res);
649 mPendingInputFrames[currentTs].error = true;
650 }
651
652 releaseInputFramesLocked(currentTs);
653
654 return true;
655}
656
657bool DepthCompositeStream::isDepthCompositeStream(const sp<Surface> &surface) {
658 ANativeWindow *anw = surface.get();
659 status_t err;
660 int format;
661 if ((err = anw->query(anw, NATIVE_WINDOW_FORMAT, &format)) != OK) {
662 String8 msg = String8::format("Failed to query Surface format: %s (%d)", strerror(-err),
663 err);
664 ALOGE("%s: %s", __FUNCTION__, msg.string());
665 return false;
666 }
667
668 int dataspace;
669 if ((err = anw->query(anw, NATIVE_WINDOW_DEFAULT_DATASPACE, &dataspace)) != OK) {
670 String8 msg = String8::format("Failed to query Surface dataspace: %s (%d)", strerror(-err),
671 err);
672 ALOGE("%s: %s", __FUNCTION__, msg.string());
673 return false;
674 }
675
676 if ((format == HAL_PIXEL_FORMAT_BLOB) && (dataspace == HAL_DATASPACE_DYNAMIC_DEPTH)) {
677 return true;
678 }
679
680 return false;
681}
682
683status_t DepthCompositeStream::createInternalStreams(const std::vector<sp<Surface>>& consumers,
684 bool /*hasDeferredConsumer*/, uint32_t width, uint32_t height, int format,
685 camera3_stream_rotation_t rotation, int *id, const String8& physicalCameraId,
686 std::vector<int> *surfaceIds, int /*streamSetId*/, bool /*isShared*/) {
687 if (mSupportedDepthSizes.empty()) {
688 ALOGE("%s: This camera device doesn't support any depth map streams!", __FUNCTION__);
689 return INVALID_OPERATION;
690 }
691
692 size_t depthWidth, depthHeight;
693 auto ret = getMatchingDepthSize(width, height, mSupportedDepthSizes, &depthWidth, &depthHeight);
694 if (ret != OK) {
695 ALOGE("%s: Failed to find an appropriate depth stream size!", __FUNCTION__);
696 return ret;
697 }
698
699 sp<CameraDeviceBase> device = mDevice.promote();
700 if (!device.get()) {
701 ALOGE("%s: Invalid camera device!", __FUNCTION__);
702 return NO_INIT;
703 }
704
705 sp<IGraphicBufferProducer> producer;
706 sp<IGraphicBufferConsumer> consumer;
707 BufferQueue::createBufferQueue(&producer, &consumer);
708 mBlobConsumer = new CpuConsumer(consumer, /*maxLockedBuffers*/1, /*controlledByApp*/ true);
709 mBlobConsumer->setFrameAvailableListener(this);
710 mBlobConsumer->setName(String8("Camera3-JpegCompositeStream"));
711 mBlobSurface = new Surface(producer);
712
713 ret = device->createStream(mBlobSurface, width, height, format, kJpegDataSpace, rotation,
714 id, physicalCameraId, surfaceIds);
715 if (ret == OK) {
716 mBlobStreamId = *id;
717 mBlobSurfaceId = (*surfaceIds)[0];
718 mOutputSurface = consumers[0];
719 } else {
720 return ret;
721 }
722
723 BufferQueue::createBufferQueue(&producer, &consumer);
724 mDepthConsumer = new CpuConsumer(consumer, /*maxLockedBuffers*/ 1, /*controlledByApp*/ true);
725 mDepthConsumer->setFrameAvailableListener(this);
726 mDepthConsumer->setName(String8("Camera3-DepthCompositeStream"));
727 mDepthSurface = new Surface(producer);
728 std::vector<int> depthSurfaceId;
729 ret = device->createStream(mDepthSurface, depthWidth, depthHeight, kDepthMapPixelFormat,
730 kDepthMapDataSpace, rotation, &mDepthStreamId, physicalCameraId, &depthSurfaceId);
731 if (ret == OK) {
732 mDepthSurfaceId = depthSurfaceId[0];
733 } else {
734 return ret;
735 }
736
737 ret = registerCompositeStreamListener(getStreamId());
738 if (ret != OK) {
739 ALOGE("%s: Failed to register blob stream listener!", __FUNCTION__);
740 return ret;
741 }
742
743 ret = registerCompositeStreamListener(mDepthStreamId);
744 if (ret != OK) {
745 ALOGE("%s: Failed to register depth stream listener!", __FUNCTION__);
746 return ret;
747 }
748
749 mBlobWidth = width;
750 mBlobHeight = height;
751
752 return ret;
753}
754
755status_t DepthCompositeStream::configureStream() {
756 if (isRunning()) {
757 // Processing thread is already running, nothing more to do.
758 return NO_ERROR;
759 }
760
761 if (mOutputSurface.get() == nullptr) {
762 ALOGE("%s: No valid output surface set!", __FUNCTION__);
763 return NO_INIT;
764 }
765
766 auto res = mOutputSurface->connect(NATIVE_WINDOW_API_CAMERA, mProducerListener);
767 if (res != OK) {
768 ALOGE("%s: Unable to connect to native window for stream %d",
769 __FUNCTION__, mBlobStreamId);
770 return res;
771 }
772
773 if ((res = native_window_set_buffers_format(mOutputSurface.get(), HAL_PIXEL_FORMAT_BLOB))
774 != OK) {
775 ALOGE("%s: Unable to configure stream buffer format for stream %d", __FUNCTION__,
776 mBlobStreamId);
777 return res;
778 }
779
780 int maxProducerBuffers;
781 ANativeWindow *anw = mBlobSurface.get();
782 if ((res = anw->query(anw, NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS, &maxProducerBuffers)) != OK) {
783 ALOGE("%s: Unable to query consumer undequeued"
784 " buffer count for stream %d", __FUNCTION__, mBlobStreamId);
785 return res;
786 }
787
788 ANativeWindow *anwConsumer = mOutputSurface.get();
789 int maxConsumerBuffers;
790 if ((res = anwConsumer->query(anwConsumer, NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS,
791 &maxConsumerBuffers)) != OK) {
792 ALOGE("%s: Unable to query consumer undequeued"
793 " buffer count for stream %d", __FUNCTION__, mBlobStreamId);
794 return res;
795 }
796
797 if ((res = native_window_set_buffer_count(
798 anwConsumer, maxProducerBuffers + maxConsumerBuffers)) != OK) {
799 ALOGE("%s: Unable to set buffer count for stream %d", __FUNCTION__, mBlobStreamId);
800 return res;
801 }
802
803 run("DepthCompositeStreamProc");
804
805 return NO_ERROR;
806}
807
808status_t DepthCompositeStream::deleteInternalStreams() {
809 // The 'CameraDeviceClient' parent will delete the blob stream
810 requestExit();
811
812 auto ret = join();
813 if (ret != OK) {
814 ALOGE("%s: Failed to join with the main processing thread: %s (%d)", __FUNCTION__,
815 strerror(-ret), ret);
816 }
817
818 sp<CameraDeviceBase> device = mDevice.promote();
819 if (!device.get()) {
820 ALOGE("%s: Invalid camera device!", __FUNCTION__);
821 return NO_INIT;
822 }
823
824 if (mDepthStreamId >= 0) {
825 ret = device->deleteStream(mDepthStreamId);
826 mDepthStreamId = -1;
827 }
828
829 return ret;
830}
831
832void DepthCompositeStream::onFrameAvailable(const BufferItem& item) {
833 if (item.mDataSpace == kJpegDataSpace) {
834 ALOGV("%s: Jpeg buffer with ts: %" PRIu64 " ms. arrived!",
835 __func__, ns2ms(item.mTimestamp));
836
837 Mutex::Autolock l(mMutex);
838 if (!mErrorState) {
839 mInputJpegBuffers.push_back(item.mTimestamp);
840 mInputReadyCondition.signal();
841 }
842 } else if (item.mDataSpace == kDepthMapDataSpace) {
843 ALOGV("%s: Depth buffer with ts: %" PRIu64 " ms. arrived!", __func__,
844 ns2ms(item.mTimestamp));
845
846 Mutex::Autolock l(mMutex);
847 if (!mErrorState) {
848 mInputDepthBuffers.push_back(item.mTimestamp);
849 mInputReadyCondition.signal();
850 }
851 } else {
852 ALOGE("%s: Unexpected data space: 0x%x", __FUNCTION__, item.mDataSpace);
853 }
854}
855
856status_t DepthCompositeStream::insertGbp(SurfaceMap* /*out*/outSurfaceMap,
857 Vector<int32_t> * /*out*/outputStreamIds, int32_t* /*out*/currentStreamId) {
858 if (outSurfaceMap->find(mDepthStreamId) == outSurfaceMap->end()) {
859 (*outSurfaceMap)[mDepthStreamId] = std::vector<size_t>();
860 outputStreamIds->push_back(mDepthStreamId);
861 }
862 (*outSurfaceMap)[mDepthStreamId].push_back(mDepthSurfaceId);
863
864 if (outSurfaceMap->find(mBlobStreamId) == outSurfaceMap->end()) {
865 (*outSurfaceMap)[mBlobStreamId] = std::vector<size_t>();
866 outputStreamIds->push_back(mBlobStreamId);
867 }
868 (*outSurfaceMap)[mBlobStreamId].push_back(mBlobSurfaceId);
869
870 if (currentStreamId != nullptr) {
871 *currentStreamId = mBlobStreamId;
872 }
873
874 return NO_ERROR;
875}
876
877void DepthCompositeStream::onResultError(const CaptureResultExtras& resultExtras) {
878 // Processing can continue even in case of result errors.
879 // At the moment depth composite stream processing relies mainly on static camera
880 // characteristics data. The actual result data can be used for the jpeg quality but
881 // in case it is absent we can default to maximum.
882 eraseResult(resultExtras.frameNumber);
883}
884
885bool DepthCompositeStream::onStreamBufferError(const CaptureResultExtras& resultExtras) {
886 bool ret = false;
887 // Buffer errors concerning internal composite streams should not be directly visible to
888 // camera clients. They must only receive a single buffer error with the public composite
889 // stream id.
890 if ((resultExtras.errorStreamId == mDepthStreamId) ||
891 (resultExtras.errorStreamId == mBlobStreamId)) {
892 flagAnErrorFrameNumber(resultExtras.frameNumber);
893 ret = true;
894 }
895
896 return ret;
897}
898
899status_t DepthCompositeStream::getMatchingDepthSize(size_t width, size_t height,
900 const std::vector<std::tuple<size_t, size_t>>& supporedDepthSizes,
901 size_t *depthWidth /*out*/, size_t *depthHeight /*out*/) {
902 if ((depthWidth == nullptr) || (depthHeight == nullptr)) {
903 return BAD_VALUE;
904 }
905
906 float arTol = CameraProviderManager::kDepthARTolerance;
907 *depthWidth = *depthHeight = 0;
908
909 float aspectRatio = static_cast<float> (width) / static_cast<float> (height);
910 for (const auto& it : supporedDepthSizes) {
911 auto currentWidth = std::get<0>(it);
912 auto currentHeight = std::get<1>(it);
913 if ((currentWidth == width) && (currentHeight == height)) {
914 *depthWidth = width;
915 *depthHeight = height;
916 break;
917 } else {
918 float currentRatio = static_cast<float> (currentWidth) /
919 static_cast<float> (currentHeight);
920 auto currentSize = currentWidth * currentHeight;
921 auto oldSize = (*depthWidth) * (*depthHeight);
922 if ((fabs(aspectRatio - currentRatio) <= arTol) && (currentSize > oldSize)) {
923 *depthWidth = currentWidth;
924 *depthHeight = currentHeight;
925 }
926 }
927 }
928
929 return ((*depthWidth > 0) && (*depthHeight > 0)) ? OK : BAD_VALUE;
930}
931
932void DepthCompositeStream::getSupportedDepthSizes(const CameraMetadata& ch,
933 std::vector<std::tuple<size_t, size_t>>* depthSizes /*out*/) {
934 if (depthSizes == nullptr) {
935 return;
936 }
937
938 auto entry = ch.find(ANDROID_DEPTH_AVAILABLE_DEPTH_STREAM_CONFIGURATIONS);
939 if (entry.count > 0) {
940 // Depth stream dimensions have four int32_t components
941 // (pixelformat, width, height, type)
942 size_t entryCount = entry.count / 4;
943 depthSizes->reserve(entryCount);
944 for (size_t i = 0; i < entry.count; i += 4) {
945 if ((entry.data.i32[i] == kDepthMapPixelFormat) &&
946 (entry.data.i32[i+3] ==
947 ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT)) {
948 depthSizes->push_back(std::make_tuple(entry.data.i32[i+1],
949 entry.data.i32[i+2]));
950 }
951 }
952 }
953}
954
955status_t DepthCompositeStream::getCompositeStreamInfo(const OutputStreamInfo &streamInfo,
956 const CameraMetadata& ch, std::vector<OutputStreamInfo>* compositeOutput /*out*/) {
957 if (compositeOutput == nullptr) {
958 return BAD_VALUE;
959 }
960
961 std::vector<std::tuple<size_t, size_t>> depthSizes;
962 getSupportedDepthSizes(ch, &depthSizes);
963 if (depthSizes.empty()) {
964 ALOGE("%s: No depth stream configurations present", __FUNCTION__);
965 return BAD_VALUE;
966 }
967
968 size_t depthWidth, depthHeight;
969 auto ret = getMatchingDepthSize(streamInfo.width, streamInfo.height, depthSizes, &depthWidth,
970 &depthHeight);
971 if (ret != OK) {
972 ALOGE("%s: No matching depth stream size found", __FUNCTION__);
973 return ret;
974 }
975
976 compositeOutput->clear();
977 compositeOutput->insert(compositeOutput->end(), 2, streamInfo);
978
979 // Jpeg/Blob stream info
980 (*compositeOutput)[0].dataSpace = kJpegDataSpace;
981 (*compositeOutput)[0].consumerUsage = GRALLOC_USAGE_SW_READ_OFTEN;
982
983 // Depth stream info
984 (*compositeOutput)[1].width = depthWidth;
985 (*compositeOutput)[1].height = depthHeight;
986 (*compositeOutput)[1].format = kDepthMapPixelFormat;
987 (*compositeOutput)[1].dataSpace = kDepthMapDataSpace;
988 (*compositeOutput)[1].consumerUsage = GRALLOC_USAGE_SW_READ_OFTEN;
989
990 return NO_ERROR;
991}
992
993std::unique_ptr<ImagingModel> DepthCompositeStream::getImagingModel() {
994 // It is not possible to generate an imaging model without instrinsic calibration.
995 if (mInstrinsicCalibration.empty() || mInstrinsicCalibration.size() != 5) {
996 return nullptr;
997 }
998
999 // The camera intrinsic calibration layout is as follows:
1000 // [focalLengthX, focalLengthY, opticalCenterX, opticalCenterY, skew]
1001 const dynamic_depth::Point<double> focalLength(mInstrinsicCalibration[0],
1002 mInstrinsicCalibration[1]);
1003 const Dimension imageSize(mBlobWidth, mBlobHeight);
1004 ImagingModelParams params(focalLength, imageSize);
1005 params.principal_point.x = mInstrinsicCalibration[2];
1006 params.principal_point.y = mInstrinsicCalibration[3];
1007 params.skew = mInstrinsicCalibration[4];
1008
1009 // The camera lens distortion contains the following lens correction coefficients.
1010 // [kappa_1, kappa_2, kappa_3 kappa_4, kappa_5]
1011 if (mLensDistortion.size() == 5) {
1012 // According to specification the lens distortion coefficients should be ordered
1013 // as [1, kappa_4, kappa_1, kappa_5, kappa_2, 0, kappa_3, 0]
1014 float distortionData[] = {1.f, mLensDistortion[3], mLensDistortion[0], mLensDistortion[4],
1015 mLensDistortion[1], 0.f, mLensDistortion[2], 0.f};
1016 auto distortionDataLength = sizeof(distortionData) / sizeof(distortionData[0]);
1017 params.distortion.reserve(distortionDataLength);
1018 params.distortion.insert(params.distortion.end(), distortionData,
1019 distortionData + distortionDataLength);
1020 }
1021
1022 return ImagingModel::FromData(params);
1023}
1024
1025}; // namespace camera3
1026}; // namespace android