| Emilian Peev | 538c90e | 2018-12-17 18:03:19 +0000 | [diff] [blame] | 1 | /* | 
|  | 2 | * Copyright (C) 2018 The Android Open Source Project | 
|  | 3 | * | 
|  | 4 | * Licensed under the Apache License, Version 2.0 (the "License"); | 
|  | 5 | * you may not use this file except in compliance with the License. | 
|  | 6 | * You may obtain a copy of the License at | 
|  | 7 | * | 
|  | 8 | *      http://www.apache.org/licenses/LICENSE-2.0 | 
|  | 9 | * | 
|  | 10 | * Unless required by applicable law or agreed to in writing, software | 
|  | 11 | * distributed under the License is distributed on an "AS IS" BASIS, | 
|  | 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | 
|  | 13 | * See the License for the specific language governing permissions and | 
|  | 14 | * limitations under the License. | 
|  | 15 | */ | 
|  | 16 |  | 
|  | 17 | #define LOG_TAG "Camera3-DepthCompositeStream" | 
|  | 18 | #define ATRACE_TAG ATRACE_TAG_CAMERA | 
|  | 19 | //#define LOG_NDEBUG 0 | 
|  | 20 |  | 
|  | 21 | #include "api1/client2/JpegProcessor.h" | 
|  | 22 | #include "common/CameraProviderManager.h" | 
| Jayant Chowdhary | 13f9b2f | 2020-12-02 22:46:15 -0800 | [diff] [blame] | 23 | #include "utils/SessionConfigurationUtils.h" | 
| Emilian Peev | 538c90e | 2018-12-17 18:03:19 +0000 | [diff] [blame] | 24 | #include <gui/Surface.h> | 
|  | 25 | #include <utils/Log.h> | 
|  | 26 | #include <utils/Trace.h> | 
|  | 27 |  | 
|  | 28 | #include "DepthCompositeStream.h" | 
|  | 29 |  | 
| Emilian Peev | 538c90e | 2018-12-17 18:03:19 +0000 | [diff] [blame] | 30 | namespace android { | 
|  | 31 | namespace camera3 { | 
|  | 32 |  | 
| Shuzhen Wang | e867578 | 2019-12-05 09:12:14 -0800 | [diff] [blame] | 33 | DepthCompositeStream::DepthCompositeStream(sp<CameraDeviceBase> device, | 
| Emilian Peev | 538c90e | 2018-12-17 18:03:19 +0000 | [diff] [blame] | 34 | wp<hardware::camera2::ICameraDeviceCallbacks> cb) : | 
|  | 35 | CompositeStream(device, cb), | 
|  | 36 | mBlobStreamId(-1), | 
|  | 37 | mBlobSurfaceId(-1), | 
|  | 38 | mDepthStreamId(-1), | 
|  | 39 | mDepthSurfaceId(-1), | 
|  | 40 | mBlobWidth(0), | 
|  | 41 | mBlobHeight(0), | 
|  | 42 | mDepthBufferAcquired(false), | 
|  | 43 | mBlobBufferAcquired(false), | 
|  | 44 | mProducerListener(new ProducerListener()), | 
| Jayant Chowdhary | cd3d36b | 2021-07-10 10:53:53 -0700 | [diff] [blame] | 45 | mMaxJpegBufferSize(-1), | 
|  | 46 | mUHRMaxJpegBufferSize(-1), | 
| Emilian Peev | 29e9ec1 | 2020-01-02 12:43:50 -0800 | [diff] [blame] | 47 | mIsLogicalCamera(false) { | 
| Shuzhen Wang | e867578 | 2019-12-05 09:12:14 -0800 | [diff] [blame] | 48 | if (device != nullptr) { | 
|  | 49 | CameraMetadata staticInfo = device->info(); | 
| Emilian Peev | 538c90e | 2018-12-17 18:03:19 +0000 | [diff] [blame] | 50 | auto entry = staticInfo.find(ANDROID_JPEG_MAX_SIZE); | 
|  | 51 | if (entry.count > 0) { | 
| Jayant Chowdhary | cd3d36b | 2021-07-10 10:53:53 -0700 | [diff] [blame] | 52 | mMaxJpegBufferSize = entry.data.i32[0]; | 
| Emilian Peev | 538c90e | 2018-12-17 18:03:19 +0000 | [diff] [blame] | 53 | } else { | 
|  | 54 | ALOGW("%s: Maximum jpeg size absent from camera characteristics", __FUNCTION__); | 
|  | 55 | } | 
|  | 56 |  | 
| Jayant Chowdhary | cd3d36b | 2021-07-10 10:53:53 -0700 | [diff] [blame] | 57 | mUHRMaxJpegSize = | 
|  | 58 | SessionConfigurationUtils::getMaxJpegResolution(staticInfo, | 
|  | 59 | /*ultraHighResolution*/true); | 
|  | 60 | mDefaultMaxJpegSize = | 
|  | 61 | SessionConfigurationUtils::getMaxJpegResolution(staticInfo, | 
|  | 62 | /*isUltraHighResolution*/false); | 
|  | 63 |  | 
|  | 64 | mUHRMaxJpegBufferSize = | 
|  | 65 | SessionConfigurationUtils::getUHRMaxJpegBufferSize(mUHRMaxJpegSize, mDefaultMaxJpegSize, | 
|  | 66 | mMaxJpegBufferSize); | 
|  | 67 |  | 
| Emilian Peev | 538c90e | 2018-12-17 18:03:19 +0000 | [diff] [blame] | 68 | entry = staticInfo.find(ANDROID_LENS_INTRINSIC_CALIBRATION); | 
|  | 69 | if (entry.count == 5) { | 
| Emilian Peev | 94c9802 | 2019-06-19 09:11:51 -0700 | [diff] [blame] | 70 | mIntrinsicCalibration.reserve(5); | 
|  | 71 | mIntrinsicCalibration.insert(mIntrinsicCalibration.end(), entry.data.f, | 
| Emilian Peev | 538c90e | 2018-12-17 18:03:19 +0000 | [diff] [blame] | 72 | entry.data.f + 5); | 
|  | 73 | } else { | 
|  | 74 | ALOGW("%s: Intrinsic calibration absent from camera characteristics!", __FUNCTION__); | 
|  | 75 | } | 
|  | 76 |  | 
|  | 77 | entry = staticInfo.find(ANDROID_LENS_DISTORTION); | 
|  | 78 | if (entry.count == 5) { | 
|  | 79 | mLensDistortion.reserve(5); | 
|  | 80 | mLensDistortion.insert(mLensDistortion.end(), entry.data.f, entry.data.f + 5); | 
|  | 81 | } else { | 
|  | 82 | ALOGW("%s: Lens distortion absent from camera characteristics!", __FUNCTION__); | 
|  | 83 | } | 
|  | 84 |  | 
|  | 85 | entry = staticInfo.find(ANDROID_REQUEST_AVAILABLE_CAPABILITIES); | 
|  | 86 | for (size_t i = 0; i < entry.count; ++i) { | 
|  | 87 | uint8_t capability = entry.data.u8[i]; | 
|  | 88 | if (capability == ANDROID_REQUEST_AVAILABLE_CAPABILITIES_LOGICAL_MULTI_CAMERA) { | 
|  | 89 | mIsLogicalCamera = true; | 
|  | 90 | break; | 
|  | 91 | } | 
|  | 92 | } | 
|  | 93 |  | 
| Jayant Chowdhary | 13f9b2f | 2020-12-02 22:46:15 -0800 | [diff] [blame] | 94 | getSupportedDepthSizes(staticInfo, /*maxResolution*/false, &mSupportedDepthSizes); | 
|  | 95 | if (SessionConfigurationUtils::isUltraHighResolutionSensor(staticInfo)) { | 
|  | 96 | getSupportedDepthSizes(staticInfo, true, &mSupportedDepthSizesMaximumResolution); | 
|  | 97 | } | 
| Emilian Peev | 538c90e | 2018-12-17 18:03:19 +0000 | [diff] [blame] | 98 | } | 
|  | 99 | } | 
|  | 100 |  | 
|  | 101 | DepthCompositeStream::~DepthCompositeStream() { | 
|  | 102 | mBlobConsumer.clear(), | 
|  | 103 | mBlobSurface.clear(), | 
|  | 104 | mBlobStreamId = -1; | 
|  | 105 | mBlobSurfaceId = -1; | 
|  | 106 | mDepthConsumer.clear(); | 
|  | 107 | mDepthSurface.clear(); | 
|  | 108 | mDepthConsumer = nullptr; | 
|  | 109 | mDepthSurface = nullptr; | 
|  | 110 | } | 
|  | 111 |  | 
|  | 112 | void DepthCompositeStream::compilePendingInputLocked() { | 
|  | 113 | CpuConsumer::LockedBuffer imgBuffer; | 
|  | 114 |  | 
|  | 115 | while (!mInputJpegBuffers.empty() && !mBlobBufferAcquired) { | 
|  | 116 | auto it = mInputJpegBuffers.begin(); | 
|  | 117 | auto res = mBlobConsumer->lockNextBuffer(&imgBuffer); | 
|  | 118 | if (res == NOT_ENOUGH_DATA) { | 
|  | 119 | // Can not lock any more buffers. | 
|  | 120 | break; | 
|  | 121 | } else if (res != OK) { | 
|  | 122 | ALOGE("%s: Error locking blob image buffer: %s (%d)", __FUNCTION__, | 
|  | 123 | strerror(-res), res); | 
|  | 124 | mPendingInputFrames[*it].error = true; | 
| Greg Kaiser | 07095df | 2019-01-29 06:28:58 -0800 | [diff] [blame] | 125 | mInputJpegBuffers.erase(it); | 
| Emilian Peev | 538c90e | 2018-12-17 18:03:19 +0000 | [diff] [blame] | 126 | continue; | 
|  | 127 | } | 
|  | 128 |  | 
|  | 129 | if (*it != imgBuffer.timestamp) { | 
|  | 130 | ALOGW("%s: Expecting jpeg buffer with time stamp: %" PRId64 " received buffer with " | 
|  | 131 | "time stamp: %" PRId64, __FUNCTION__, *it, imgBuffer.timestamp); | 
|  | 132 | } | 
|  | 133 |  | 
|  | 134 | if ((mPendingInputFrames.find(imgBuffer.timestamp) != mPendingInputFrames.end()) && | 
|  | 135 | (mPendingInputFrames[imgBuffer.timestamp].error)) { | 
|  | 136 | mBlobConsumer->unlockBuffer(imgBuffer); | 
|  | 137 | } else { | 
|  | 138 | mPendingInputFrames[imgBuffer.timestamp].jpegBuffer = imgBuffer; | 
|  | 139 | mBlobBufferAcquired = true; | 
|  | 140 | } | 
|  | 141 | mInputJpegBuffers.erase(it); | 
|  | 142 | } | 
|  | 143 |  | 
|  | 144 | while (!mInputDepthBuffers.empty() && !mDepthBufferAcquired) { | 
|  | 145 | auto it = mInputDepthBuffers.begin(); | 
|  | 146 | auto res = mDepthConsumer->lockNextBuffer(&imgBuffer); | 
|  | 147 | if (res == NOT_ENOUGH_DATA) { | 
|  | 148 | // Can not lock any more buffers. | 
|  | 149 | break; | 
|  | 150 | } else if (res != OK) { | 
|  | 151 | ALOGE("%s: Error receiving depth image buffer: %s (%d)", __FUNCTION__, | 
|  | 152 | strerror(-res), res); | 
|  | 153 | mPendingInputFrames[*it].error = true; | 
|  | 154 | mInputDepthBuffers.erase(it); | 
|  | 155 | continue; | 
|  | 156 | } | 
|  | 157 |  | 
|  | 158 | if (*it != imgBuffer.timestamp) { | 
|  | 159 | ALOGW("%s: Expecting depth buffer with time stamp: %" PRId64 " received buffer with " | 
|  | 160 | "time stamp: %" PRId64, __FUNCTION__, *it, imgBuffer.timestamp); | 
|  | 161 | } | 
|  | 162 |  | 
|  | 163 | if ((mPendingInputFrames.find(imgBuffer.timestamp) != mPendingInputFrames.end()) && | 
|  | 164 | (mPendingInputFrames[imgBuffer.timestamp].error)) { | 
|  | 165 | mDepthConsumer->unlockBuffer(imgBuffer); | 
|  | 166 | } else { | 
|  | 167 | mPendingInputFrames[imgBuffer.timestamp].depthBuffer = imgBuffer; | 
|  | 168 | mDepthBufferAcquired = true; | 
|  | 169 | } | 
|  | 170 | mInputDepthBuffers.erase(it); | 
|  | 171 | } | 
|  | 172 |  | 
|  | 173 | while (!mCaptureResults.empty()) { | 
|  | 174 | auto it = mCaptureResults.begin(); | 
|  | 175 | // Negative timestamp indicates that something went wrong during the capture result | 
|  | 176 | // collection process. | 
|  | 177 | if (it->first >= 0) { | 
|  | 178 | mPendingInputFrames[it->first].frameNumber = std::get<0>(it->second); | 
|  | 179 | mPendingInputFrames[it->first].result = std::get<1>(it->second); | 
|  | 180 | } | 
|  | 181 | mCaptureResults.erase(it); | 
|  | 182 | } | 
|  | 183 |  | 
|  | 184 | while (!mFrameNumberMap.empty()) { | 
|  | 185 | auto it = mFrameNumberMap.begin(); | 
|  | 186 | mPendingInputFrames[it->second].frameNumber = it->first; | 
|  | 187 | mFrameNumberMap.erase(it); | 
|  | 188 | } | 
|  | 189 |  | 
|  | 190 | auto it = mErrorFrameNumbers.begin(); | 
|  | 191 | while (it != mErrorFrameNumbers.end()) { | 
|  | 192 | bool frameFound = false; | 
|  | 193 | for (auto &inputFrame : mPendingInputFrames) { | 
|  | 194 | if (inputFrame.second.frameNumber == *it) { | 
|  | 195 | inputFrame.second.error = true; | 
|  | 196 | frameFound = true; | 
|  | 197 | break; | 
|  | 198 | } | 
|  | 199 | } | 
|  | 200 |  | 
|  | 201 | if (frameFound) { | 
|  | 202 | it = mErrorFrameNumbers.erase(it); | 
|  | 203 | } else { | 
|  | 204 | ALOGW("%s: Not able to find failing input with frame number: %" PRId64, __FUNCTION__, | 
|  | 205 | *it); | 
|  | 206 | it++; | 
|  | 207 | } | 
|  | 208 | } | 
|  | 209 | } | 
|  | 210 |  | 
|  | 211 | bool DepthCompositeStream::getNextReadyInputLocked(int64_t *currentTs /*inout*/) { | 
|  | 212 | if (currentTs == nullptr) { | 
|  | 213 | return false; | 
|  | 214 | } | 
|  | 215 |  | 
|  | 216 | bool newInputAvailable = false; | 
|  | 217 | for (const auto& it : mPendingInputFrames) { | 
|  | 218 | if ((!it.second.error) && (it.second.depthBuffer.data != nullptr) && | 
|  | 219 | (it.second.jpegBuffer.data != nullptr) && (it.first < *currentTs)) { | 
|  | 220 | *currentTs = it.first; | 
|  | 221 | newInputAvailable = true; | 
|  | 222 | } | 
|  | 223 | } | 
|  | 224 |  | 
|  | 225 | return newInputAvailable; | 
|  | 226 | } | 
|  | 227 |  | 
|  | 228 | int64_t DepthCompositeStream::getNextFailingInputLocked(int64_t *currentTs /*inout*/) { | 
|  | 229 | int64_t ret = -1; | 
|  | 230 | if (currentTs == nullptr) { | 
|  | 231 | return ret; | 
|  | 232 | } | 
|  | 233 |  | 
|  | 234 | for (const auto& it : mPendingInputFrames) { | 
|  | 235 | if (it.second.error && !it.second.errorNotified && (it.first < *currentTs)) { | 
|  | 236 | *currentTs = it.first; | 
|  | 237 | ret = it.second.frameNumber; | 
|  | 238 | } | 
|  | 239 | } | 
|  | 240 |  | 
|  | 241 | return ret; | 
|  | 242 | } | 
|  | 243 |  | 
| Emilian Peev | 90a839f | 2019-10-02 15:12:50 -0700 | [diff] [blame] | 244 | status_t DepthCompositeStream::processInputFrame(nsecs_t ts, const InputFrame &inputFrame) { | 
| Emilian Peev | 538c90e | 2018-12-17 18:03:19 +0000 | [diff] [blame] | 245 | status_t res; | 
|  | 246 | sp<ANativeWindow> outputANW = mOutputSurface; | 
|  | 247 | ANativeWindowBuffer *anb; | 
|  | 248 | int fenceFd; | 
|  | 249 | void *dstBuffer; | 
| Emilian Peev | 538c90e | 2018-12-17 18:03:19 +0000 | [diff] [blame] | 250 |  | 
|  | 251 | auto jpegSize = android::camera2::JpegProcessor::findJpegSize(inputFrame.jpegBuffer.data, | 
|  | 252 | inputFrame.jpegBuffer.width); | 
|  | 253 | if (jpegSize == 0) { | 
|  | 254 | ALOGW("%s: Failed to find input jpeg size, default to using entire buffer!", __FUNCTION__); | 
|  | 255 | jpegSize = inputFrame.jpegBuffer.width; | 
|  | 256 | } | 
|  | 257 |  | 
| Jayant Chowdhary | cd3d36b | 2021-07-10 10:53:53 -0700 | [diff] [blame] | 258 | size_t maxDepthJpegBufferSize = 0; | 
|  | 259 | if (mMaxJpegBufferSize > 0) { | 
|  | 260 | // If this is an ultra high resolution sensor and the input frames size | 
|  | 261 | // is > default res jpeg. | 
|  | 262 | if (mUHRMaxJpegSize.width != 0 && | 
|  | 263 | inputFrame.jpegBuffer.width * inputFrame.jpegBuffer.height > | 
|  | 264 | mDefaultMaxJpegSize.width * mDefaultMaxJpegSize.height) { | 
|  | 265 | maxDepthJpegBufferSize = mUHRMaxJpegBufferSize; | 
|  | 266 | } else { | 
|  | 267 | maxDepthJpegBufferSize = mMaxJpegBufferSize; | 
|  | 268 | } | 
| Emilian Peev | 538c90e | 2018-12-17 18:03:19 +0000 | [diff] [blame] | 269 | } else { | 
| Jayant Chowdhary | cd3d36b | 2021-07-10 10:53:53 -0700 | [diff] [blame] | 270 | maxDepthJpegBufferSize = std::max<size_t> (jpegSize, | 
| Emilian Peev | 538c90e | 2018-12-17 18:03:19 +0000 | [diff] [blame] | 271 | inputFrame.depthBuffer.width * inputFrame.depthBuffer.height * 3 / 2); | 
|  | 272 | } | 
| Jayant Chowdhary | cd3d36b | 2021-07-10 10:53:53 -0700 | [diff] [blame] | 273 |  | 
| Emilian Peev | 538c90e | 2018-12-17 18:03:19 +0000 | [diff] [blame] | 274 | uint8_t jpegQuality = 100; | 
|  | 275 | auto entry = inputFrame.result.find(ANDROID_JPEG_QUALITY); | 
|  | 276 | if (entry.count > 0) { | 
|  | 277 | jpegQuality = entry.data.u8[0]; | 
|  | 278 | } | 
| Emilian Peev | 538c90e | 2018-12-17 18:03:19 +0000 | [diff] [blame] | 279 |  | 
| Emilian Peev | cbf174b | 2019-01-25 14:38:59 -0800 | [diff] [blame] | 280 | // The final depth photo will consist of the main jpeg buffer, the depth map buffer (also in | 
|  | 281 | // jpeg format) and confidence map (jpeg as well). Assume worst case that all 3 jpeg need | 
|  | 282 | // max jpeg size. | 
| Jayant Chowdhary | cd3d36b | 2021-07-10 10:53:53 -0700 | [diff] [blame] | 283 | size_t finalJpegBufferSize = maxDepthJpegBufferSize * 3; | 
| Emilian Peev | 538c90e | 2018-12-17 18:03:19 +0000 | [diff] [blame] | 284 |  | 
| Emilian Peev | cbf174b | 2019-01-25 14:38:59 -0800 | [diff] [blame] | 285 | if ((res = native_window_set_buffers_dimensions(mOutputSurface.get(), finalJpegBufferSize, 1)) | 
| Emilian Peev | 538c90e | 2018-12-17 18:03:19 +0000 | [diff] [blame] | 286 | != OK) { | 
|  | 287 | ALOGE("%s: Unable to configure stream buffer dimensions" | 
| Emilian Peev | cbf174b | 2019-01-25 14:38:59 -0800 | [diff] [blame] | 288 | " %zux%u for stream %d", __FUNCTION__, finalJpegBufferSize, 1U, mBlobStreamId); | 
| Emilian Peev | 538c90e | 2018-12-17 18:03:19 +0000 | [diff] [blame] | 289 | return res; | 
|  | 290 | } | 
|  | 291 |  | 
|  | 292 | res = outputANW->dequeueBuffer(mOutputSurface.get(), &anb, &fenceFd); | 
|  | 293 | if (res != OK) { | 
|  | 294 | ALOGE("%s: Error retrieving output buffer: %s (%d)", __FUNCTION__, strerror(-res), | 
|  | 295 | res); | 
|  | 296 | return res; | 
|  | 297 | } | 
|  | 298 |  | 
|  | 299 | sp<GraphicBuffer> gb = GraphicBuffer::from(anb); | 
|  | 300 | res = gb->lockAsync(GRALLOC_USAGE_SW_WRITE_OFTEN, &dstBuffer, fenceFd); | 
|  | 301 | if (res != OK) { | 
|  | 302 | ALOGE("%s: Error trying to lock output buffer fence: %s (%d)", __FUNCTION__, | 
|  | 303 | strerror(-res), res); | 
|  | 304 | outputANW->cancelBuffer(mOutputSurface.get(), anb, /*fence*/ -1); | 
|  | 305 | return res; | 
|  | 306 | } | 
|  | 307 |  | 
| Emilian Peev | cbf174b | 2019-01-25 14:38:59 -0800 | [diff] [blame] | 308 | if ((gb->getWidth() < finalJpegBufferSize) || (gb->getHeight() != 1)) { | 
| Emilian Peev | 538c90e | 2018-12-17 18:03:19 +0000 | [diff] [blame] | 309 | ALOGE("%s: Blob buffer size mismatch, expected %dx%d received %zux%u", __FUNCTION__, | 
| Emilian Peev | cbf174b | 2019-01-25 14:38:59 -0800 | [diff] [blame] | 310 | gb->getWidth(), gb->getHeight(), finalJpegBufferSize, 1U); | 
| Emilian Peev | 538c90e | 2018-12-17 18:03:19 +0000 | [diff] [blame] | 311 | outputANW->cancelBuffer(mOutputSurface.get(), anb, /*fence*/ -1); | 
|  | 312 | return BAD_VALUE; | 
|  | 313 | } | 
|  | 314 |  | 
| Emilian Peev | cbf174b | 2019-01-25 14:38:59 -0800 | [diff] [blame] | 315 | DepthPhotoInputFrame depthPhoto; | 
|  | 316 | depthPhoto.mMainJpegBuffer = reinterpret_cast<const char*> (inputFrame.jpegBuffer.data); | 
|  | 317 | depthPhoto.mMainJpegWidth = mBlobWidth; | 
|  | 318 | depthPhoto.mMainJpegHeight = mBlobHeight; | 
|  | 319 | depthPhoto.mMainJpegSize = jpegSize; | 
|  | 320 | depthPhoto.mDepthMapBuffer = reinterpret_cast<uint16_t*> (inputFrame.depthBuffer.data); | 
|  | 321 | depthPhoto.mDepthMapWidth = inputFrame.depthBuffer.width; | 
|  | 322 | depthPhoto.mDepthMapHeight = inputFrame.depthBuffer.height; | 
|  | 323 | depthPhoto.mDepthMapStride = inputFrame.depthBuffer.stride; | 
|  | 324 | depthPhoto.mJpegQuality = jpegQuality; | 
|  | 325 | depthPhoto.mIsLogical = mIsLogicalCamera; | 
| Jayant Chowdhary | cd3d36b | 2021-07-10 10:53:53 -0700 | [diff] [blame] | 326 | depthPhoto.mMaxJpegSize = maxDepthJpegBufferSize; | 
| Emilian Peev | cbf174b | 2019-01-25 14:38:59 -0800 | [diff] [blame] | 327 | // The camera intrinsic calibration layout is as follows: | 
|  | 328 | // [focalLengthX, focalLengthY, opticalCenterX, opticalCenterY, skew] | 
| Emilian Peev | 94c9802 | 2019-06-19 09:11:51 -0700 | [diff] [blame] | 329 | if (mIntrinsicCalibration.size() == 5) { | 
|  | 330 | memcpy(depthPhoto.mIntrinsicCalibration, mIntrinsicCalibration.data(), | 
|  | 331 | sizeof(depthPhoto.mIntrinsicCalibration)); | 
|  | 332 | depthPhoto.mIsIntrinsicCalibrationValid = 1; | 
| Emilian Peev | cbf174b | 2019-01-25 14:38:59 -0800 | [diff] [blame] | 333 | } else { | 
| Emilian Peev | 94c9802 | 2019-06-19 09:11:51 -0700 | [diff] [blame] | 334 | depthPhoto.mIsIntrinsicCalibrationValid = 0; | 
| Emilian Peev | cbf174b | 2019-01-25 14:38:59 -0800 | [diff] [blame] | 335 | } | 
|  | 336 | // The camera lens distortion contains the following lens correction coefficients. | 
|  | 337 | // [kappa_1, kappa_2, kappa_3 kappa_4, kappa_5] | 
|  | 338 | if (mLensDistortion.size() == 5) { | 
|  | 339 | memcpy(depthPhoto.mLensDistortion, mLensDistortion.data(), | 
|  | 340 | sizeof(depthPhoto.mLensDistortion)); | 
|  | 341 | depthPhoto.mIsLensDistortionValid = 1; | 
|  | 342 | } else { | 
|  | 343 | depthPhoto.mIsLensDistortionValid = 0; | 
|  | 344 | } | 
| Emilian Peev | 06af8c9 | 2019-02-07 12:34:41 -0800 | [diff] [blame] | 345 | entry = inputFrame.result.find(ANDROID_JPEG_ORIENTATION); | 
|  | 346 | if (entry.count > 0) { | 
|  | 347 | // The camera jpeg orientation values must be within [0, 90, 180, 270]. | 
|  | 348 | switch (entry.data.i32[0]) { | 
|  | 349 | case 0: | 
|  | 350 | case 90: | 
|  | 351 | case 180: | 
|  | 352 | case 270: | 
|  | 353 | depthPhoto.mOrientation = static_cast<DepthPhotoOrientation> (entry.data.i32[0]); | 
|  | 354 | break; | 
|  | 355 | default: | 
|  | 356 | ALOGE("%s: Unexpected jpeg orientation value: %d, default to 0 degrees", | 
|  | 357 | __FUNCTION__, entry.data.i32[0]); | 
|  | 358 | } | 
|  | 359 | } | 
| Emilian Peev | cbf174b | 2019-01-25 14:38:59 -0800 | [diff] [blame] | 360 |  | 
|  | 361 | size_t actualJpegSize = 0; | 
| Emilian Peev | 29e9ec1 | 2020-01-02 12:43:50 -0800 | [diff] [blame] | 362 | res = processDepthPhotoFrame(depthPhoto, finalJpegBufferSize, dstBuffer, &actualJpegSize); | 
| Emilian Peev | cbf174b | 2019-01-25 14:38:59 -0800 | [diff] [blame] | 363 | if (res != 0) { | 
|  | 364 | ALOGE("%s: Depth photo processing failed: %s (%d)", __FUNCTION__, strerror(-res), res); | 
|  | 365 | outputANW->cancelBuffer(mOutputSurface.get(), anb, /*fence*/ -1); | 
|  | 366 | return res; | 
|  | 367 | } | 
|  | 368 |  | 
| Emilian Peev | f481670 | 2020-04-03 15:44:51 -0700 | [diff] [blame] | 369 | size_t finalJpegSize = actualJpegSize + sizeof(struct camera_jpeg_blob); | 
| Emilian Peev | cbf174b | 2019-01-25 14:38:59 -0800 | [diff] [blame] | 370 | if (finalJpegSize > finalJpegBufferSize) { | 
|  | 371 | ALOGE("%s: Final jpeg buffer not large enough for the jpeg blob header", __FUNCTION__); | 
|  | 372 | outputANW->cancelBuffer(mOutputSurface.get(), anb, /*fence*/ -1); | 
|  | 373 | return NO_MEMORY; | 
|  | 374 | } | 
|  | 375 |  | 
| Emilian Peev | 90a839f | 2019-10-02 15:12:50 -0700 | [diff] [blame] | 376 | res = native_window_set_buffers_timestamp(mOutputSurface.get(), ts); | 
|  | 377 | if (res != OK) { | 
|  | 378 | ALOGE("%s: Stream %d: Error setting timestamp: %s (%d)", __FUNCTION__, | 
|  | 379 | getStreamId(), strerror(-res), res); | 
|  | 380 | return res; | 
|  | 381 | } | 
|  | 382 |  | 
| Emilian Peev | cbf174b | 2019-01-25 14:38:59 -0800 | [diff] [blame] | 383 | ALOGV("%s: Final jpeg size: %zu", __func__, finalJpegSize); | 
| Emilian Peev | 538c90e | 2018-12-17 18:03:19 +0000 | [diff] [blame] | 384 | uint8_t* header = static_cast<uint8_t *> (dstBuffer) + | 
| Emilian Peev | f481670 | 2020-04-03 15:44:51 -0700 | [diff] [blame] | 385 | (gb->getWidth() - sizeof(struct camera_jpeg_blob)); | 
|  | 386 | struct camera_jpeg_blob *blob = reinterpret_cast<struct camera_jpeg_blob*> (header); | 
|  | 387 | blob->jpeg_blob_id = CAMERA_JPEG_BLOB_ID; | 
| Emilian Peev | cbf174b | 2019-01-25 14:38:59 -0800 | [diff] [blame] | 388 | blob->jpeg_size = actualJpegSize; | 
| Emilian Peev | 538c90e | 2018-12-17 18:03:19 +0000 | [diff] [blame] | 389 | outputANW->queueBuffer(mOutputSurface.get(), anb, /*fence*/ -1); | 
|  | 390 |  | 
|  | 391 | return res; | 
|  | 392 | } | 
|  | 393 |  | 
|  | 394 | void DepthCompositeStream::releaseInputFrameLocked(InputFrame *inputFrame /*out*/) { | 
|  | 395 | if (inputFrame == nullptr) { | 
|  | 396 | return; | 
|  | 397 | } | 
|  | 398 |  | 
|  | 399 | if (inputFrame->depthBuffer.data != nullptr) { | 
|  | 400 | mDepthConsumer->unlockBuffer(inputFrame->depthBuffer); | 
|  | 401 | inputFrame->depthBuffer.data = nullptr; | 
|  | 402 | mDepthBufferAcquired = false; | 
|  | 403 | } | 
|  | 404 |  | 
|  | 405 | if (inputFrame->jpegBuffer.data != nullptr) { | 
|  | 406 | mBlobConsumer->unlockBuffer(inputFrame->jpegBuffer); | 
|  | 407 | inputFrame->jpegBuffer.data = nullptr; | 
|  | 408 | mBlobBufferAcquired = false; | 
|  | 409 | } | 
|  | 410 |  | 
|  | 411 | if ((inputFrame->error || mErrorState) && !inputFrame->errorNotified) { | 
| Shuzhen Wang | e867578 | 2019-12-05 09:12:14 -0800 | [diff] [blame] | 412 | //TODO: Figure out correct requestId | 
|  | 413 | notifyError(inputFrame->frameNumber, -1 /*requestId*/); | 
| Emilian Peev | 538c90e | 2018-12-17 18:03:19 +0000 | [diff] [blame] | 414 | inputFrame->errorNotified = true; | 
|  | 415 | } | 
|  | 416 | } | 
|  | 417 |  | 
|  | 418 | void DepthCompositeStream::releaseInputFramesLocked(int64_t currentTs) { | 
|  | 419 | auto it = mPendingInputFrames.begin(); | 
|  | 420 | while (it != mPendingInputFrames.end()) { | 
|  | 421 | if (it->first <= currentTs) { | 
|  | 422 | releaseInputFrameLocked(&it->second); | 
|  | 423 | it = mPendingInputFrames.erase(it); | 
|  | 424 | } else { | 
|  | 425 | it++; | 
|  | 426 | } | 
|  | 427 | } | 
|  | 428 | } | 
|  | 429 |  | 
|  | 430 | bool DepthCompositeStream::threadLoop() { | 
|  | 431 | int64_t currentTs = INT64_MAX; | 
|  | 432 | bool newInputAvailable = false; | 
|  | 433 |  | 
|  | 434 | { | 
|  | 435 | Mutex::Autolock l(mMutex); | 
|  | 436 |  | 
|  | 437 | if (mErrorState) { | 
|  | 438 | // In case we landed in error state, return any pending buffers and | 
|  | 439 | // halt all further processing. | 
|  | 440 | compilePendingInputLocked(); | 
|  | 441 | releaseInputFramesLocked(currentTs); | 
|  | 442 | return false; | 
|  | 443 | } | 
|  | 444 |  | 
|  | 445 | while (!newInputAvailable) { | 
|  | 446 | compilePendingInputLocked(); | 
|  | 447 | newInputAvailable = getNextReadyInputLocked(¤tTs); | 
|  | 448 | if (!newInputAvailable) { | 
|  | 449 | auto failingFrameNumber = getNextFailingInputLocked(¤tTs); | 
|  | 450 | if (failingFrameNumber >= 0) { | 
|  | 451 | // We cannot erase 'mPendingInputFrames[currentTs]' at this point because it is | 
|  | 452 | // possible for two internal stream buffers to fail. In such scenario the | 
|  | 453 | // composite stream should notify the client about a stream buffer error only | 
|  | 454 | // once and this information is kept within 'errorNotified'. | 
|  | 455 | // Any present failed input frames will be removed on a subsequent call to | 
|  | 456 | // 'releaseInputFramesLocked()'. | 
|  | 457 | releaseInputFrameLocked(&mPendingInputFrames[currentTs]); | 
|  | 458 | currentTs = INT64_MAX; | 
|  | 459 | } | 
|  | 460 |  | 
|  | 461 | auto ret = mInputReadyCondition.waitRelative(mMutex, kWaitDuration); | 
|  | 462 | if (ret == TIMED_OUT) { | 
|  | 463 | return true; | 
|  | 464 | } else if (ret != OK) { | 
|  | 465 | ALOGE("%s: Timed wait on condition failed: %s (%d)", __FUNCTION__, | 
|  | 466 | strerror(-ret), ret); | 
|  | 467 | return false; | 
|  | 468 | } | 
|  | 469 | } | 
|  | 470 | } | 
|  | 471 | } | 
|  | 472 |  | 
| Emilian Peev | 90a839f | 2019-10-02 15:12:50 -0700 | [diff] [blame] | 473 | auto res = processInputFrame(currentTs, mPendingInputFrames[currentTs]); | 
| Emilian Peev | 538c90e | 2018-12-17 18:03:19 +0000 | [diff] [blame] | 474 | Mutex::Autolock l(mMutex); | 
|  | 475 | if (res != OK) { | 
|  | 476 | ALOGE("%s: Failed processing frame with timestamp: %" PRIu64 ": %s (%d)", __FUNCTION__, | 
|  | 477 | currentTs, strerror(-res), res); | 
|  | 478 | mPendingInputFrames[currentTs].error = true; | 
|  | 479 | } | 
|  | 480 |  | 
|  | 481 | releaseInputFramesLocked(currentTs); | 
|  | 482 |  | 
|  | 483 | return true; | 
|  | 484 | } | 
|  | 485 |  | 
|  | 486 | bool DepthCompositeStream::isDepthCompositeStream(const sp<Surface> &surface) { | 
|  | 487 | ANativeWindow *anw = surface.get(); | 
|  | 488 | status_t err; | 
|  | 489 | int format; | 
|  | 490 | if ((err = anw->query(anw, NATIVE_WINDOW_FORMAT, &format)) != OK) { | 
|  | 491 | String8 msg = String8::format("Failed to query Surface format: %s (%d)", strerror(-err), | 
|  | 492 | err); | 
|  | 493 | ALOGE("%s: %s", __FUNCTION__, msg.string()); | 
|  | 494 | return false; | 
|  | 495 | } | 
|  | 496 |  | 
|  | 497 | int dataspace; | 
|  | 498 | if ((err = anw->query(anw, NATIVE_WINDOW_DEFAULT_DATASPACE, &dataspace)) != OK) { | 
|  | 499 | String8 msg = String8::format("Failed to query Surface dataspace: %s (%d)", strerror(-err), | 
|  | 500 | err); | 
|  | 501 | ALOGE("%s: %s", __FUNCTION__, msg.string()); | 
|  | 502 | return false; | 
|  | 503 | } | 
|  | 504 |  | 
|  | 505 | if ((format == HAL_PIXEL_FORMAT_BLOB) && (dataspace == HAL_DATASPACE_DYNAMIC_DEPTH)) { | 
|  | 506 | return true; | 
|  | 507 | } | 
|  | 508 |  | 
|  | 509 | return false; | 
|  | 510 | } | 
|  | 511 |  | 
| Jayant Chowdhary | 13f9b2f | 2020-12-02 22:46:15 -0800 | [diff] [blame] | 512 | static bool setContains(std::unordered_set<int32_t> containerSet, int32_t value) { | 
|  | 513 | return containerSet.find(value) != containerSet.end(); | 
|  | 514 | } | 
|  | 515 |  | 
|  | 516 | status_t DepthCompositeStream::checkAndGetMatchingDepthSize(size_t width, size_t height, | 
|  | 517 | const std::vector<std::tuple<size_t, size_t>> &depthSizes, | 
|  | 518 | const std::vector<std::tuple<size_t, size_t>> &depthSizesMaximumResolution, | 
|  | 519 | const std::unordered_set<int32_t> &sensorPixelModesUsed, | 
|  | 520 | size_t *depthWidth, size_t *depthHeight) { | 
|  | 521 | if (depthWidth == nullptr || depthHeight == nullptr) { | 
|  | 522 | return BAD_VALUE; | 
|  | 523 | } | 
|  | 524 | size_t chosenDepthWidth = 0, chosenDepthHeight = 0; | 
|  | 525 | bool hasDefaultSensorPixelMode = | 
|  | 526 | setContains(sensorPixelModesUsed, ANDROID_SENSOR_PIXEL_MODE_DEFAULT); | 
|  | 527 |  | 
|  | 528 | bool hasMaximumResolutionSensorPixelMode = | 
|  | 529 | setContains(sensorPixelModesUsed, ANDROID_SENSOR_PIXEL_MODE_MAXIMUM_RESOLUTION); | 
|  | 530 |  | 
|  | 531 | if (!hasDefaultSensorPixelMode && !hasMaximumResolutionSensorPixelMode) { | 
|  | 532 | ALOGE("%s: sensor pixel modes don't contain either maximum resolution or default modes", | 
|  | 533 | __FUNCTION__); | 
|  | 534 | return BAD_VALUE; | 
|  | 535 | } | 
|  | 536 |  | 
|  | 537 | if (hasDefaultSensorPixelMode) { | 
|  | 538 | auto ret = getMatchingDepthSize(width, height, depthSizes, &chosenDepthWidth, | 
|  | 539 | &chosenDepthHeight); | 
|  | 540 | if (ret != OK) { | 
|  | 541 | ALOGE("%s: No matching depth stream size found", __FUNCTION__); | 
|  | 542 | return ret; | 
|  | 543 | } | 
|  | 544 | } | 
|  | 545 |  | 
|  | 546 | if (hasMaximumResolutionSensorPixelMode) { | 
|  | 547 | size_t depthWidth = 0, depthHeight = 0; | 
|  | 548 | auto ret = getMatchingDepthSize(width, height, | 
|  | 549 | depthSizesMaximumResolution, &depthWidth, &depthHeight); | 
|  | 550 | if (ret != OK) { | 
|  | 551 | ALOGE("%s: No matching max resolution depth stream size found", __FUNCTION__); | 
|  | 552 | return ret; | 
|  | 553 | } | 
|  | 554 | // Both matching depth sizes should be the same. | 
|  | 555 | if (chosenDepthWidth != 0 && chosenDepthWidth != depthWidth && | 
|  | 556 | chosenDepthHeight != depthHeight) { | 
|  | 557 | ALOGE("%s: Maximum resolution sensor pixel mode and default sensor pixel mode don't" | 
|  | 558 | " have matching depth sizes", __FUNCTION__); | 
|  | 559 | return BAD_VALUE; | 
|  | 560 | } | 
|  | 561 | if (chosenDepthWidth == 0) { | 
|  | 562 | chosenDepthWidth = depthWidth; | 
|  | 563 | chosenDepthHeight = depthHeight; | 
|  | 564 | } | 
|  | 565 | } | 
|  | 566 | *depthWidth = chosenDepthWidth; | 
|  | 567 | *depthHeight = chosenDepthHeight; | 
|  | 568 | return OK; | 
|  | 569 | } | 
|  | 570 |  | 
|  | 571 |  | 
| Emilian Peev | 538c90e | 2018-12-17 18:03:19 +0000 | [diff] [blame] | 572 | status_t DepthCompositeStream::createInternalStreams(const std::vector<sp<Surface>>& consumers, | 
|  | 573 | bool /*hasDeferredConsumer*/, uint32_t width, uint32_t height, int format, | 
| Emilian Peev | f481670 | 2020-04-03 15:44:51 -0700 | [diff] [blame] | 574 | camera_stream_rotation_t rotation, int *id, const String8& physicalCameraId, | 
| Jayant Chowdhary | 13f9b2f | 2020-12-02 22:46:15 -0800 | [diff] [blame] | 575 | const std::unordered_set<int32_t> &sensorPixelModesUsed, | 
|  | 576 | std::vector<int> *surfaceIds, | 
|  | 577 | int /*streamSetId*/, bool /*isShared*/) { | 
| Emilian Peev | 538c90e | 2018-12-17 18:03:19 +0000 | [diff] [blame] | 578 | if (mSupportedDepthSizes.empty()) { | 
|  | 579 | ALOGE("%s: This camera device doesn't support any depth map streams!", __FUNCTION__); | 
|  | 580 | return INVALID_OPERATION; | 
|  | 581 | } | 
|  | 582 |  | 
|  | 583 | size_t depthWidth, depthHeight; | 
| Jayant Chowdhary | 13f9b2f | 2020-12-02 22:46:15 -0800 | [diff] [blame] | 584 | auto ret = | 
|  | 585 | checkAndGetMatchingDepthSize(width, height, mSupportedDepthSizes, | 
|  | 586 | mSupportedDepthSizesMaximumResolution, sensorPixelModesUsed, &depthWidth, | 
|  | 587 | &depthHeight); | 
| Emilian Peev | 538c90e | 2018-12-17 18:03:19 +0000 | [diff] [blame] | 588 | if (ret != OK) { | 
|  | 589 | ALOGE("%s: Failed to find an appropriate depth stream size!", __FUNCTION__); | 
|  | 590 | return ret; | 
|  | 591 | } | 
|  | 592 |  | 
|  | 593 | sp<CameraDeviceBase> device = mDevice.promote(); | 
|  | 594 | if (!device.get()) { | 
|  | 595 | ALOGE("%s: Invalid camera device!", __FUNCTION__); | 
|  | 596 | return NO_INIT; | 
|  | 597 | } | 
|  | 598 |  | 
|  | 599 | sp<IGraphicBufferProducer> producer; | 
|  | 600 | sp<IGraphicBufferConsumer> consumer; | 
|  | 601 | BufferQueue::createBufferQueue(&producer, &consumer); | 
|  | 602 | mBlobConsumer = new CpuConsumer(consumer, /*maxLockedBuffers*/1, /*controlledByApp*/ true); | 
|  | 603 | mBlobConsumer->setFrameAvailableListener(this); | 
|  | 604 | mBlobConsumer->setName(String8("Camera3-JpegCompositeStream")); | 
|  | 605 | mBlobSurface = new Surface(producer); | 
|  | 606 |  | 
|  | 607 | ret = device->createStream(mBlobSurface, width, height, format, kJpegDataSpace, rotation, | 
| Jayant Chowdhary | 13f9b2f | 2020-12-02 22:46:15 -0800 | [diff] [blame] | 608 | id, physicalCameraId, sensorPixelModesUsed, surfaceIds); | 
| Emilian Peev | 538c90e | 2018-12-17 18:03:19 +0000 | [diff] [blame] | 609 | if (ret == OK) { | 
|  | 610 | mBlobStreamId = *id; | 
|  | 611 | mBlobSurfaceId = (*surfaceIds)[0]; | 
|  | 612 | mOutputSurface = consumers[0]; | 
|  | 613 | } else { | 
|  | 614 | return ret; | 
|  | 615 | } | 
|  | 616 |  | 
|  | 617 | BufferQueue::createBufferQueue(&producer, &consumer); | 
|  | 618 | mDepthConsumer = new CpuConsumer(consumer, /*maxLockedBuffers*/ 1, /*controlledByApp*/ true); | 
|  | 619 | mDepthConsumer->setFrameAvailableListener(this); | 
|  | 620 | mDepthConsumer->setName(String8("Camera3-DepthCompositeStream")); | 
|  | 621 | mDepthSurface = new Surface(producer); | 
|  | 622 | std::vector<int> depthSurfaceId; | 
|  | 623 | ret = device->createStream(mDepthSurface, depthWidth, depthHeight, kDepthMapPixelFormat, | 
| Jayant Chowdhary | 13f9b2f | 2020-12-02 22:46:15 -0800 | [diff] [blame] | 624 | kDepthMapDataSpace, rotation, &mDepthStreamId, physicalCameraId, sensorPixelModesUsed, | 
|  | 625 | &depthSurfaceId); | 
| Emilian Peev | 538c90e | 2018-12-17 18:03:19 +0000 | [diff] [blame] | 626 | if (ret == OK) { | 
|  | 627 | mDepthSurfaceId = depthSurfaceId[0]; | 
|  | 628 | } else { | 
|  | 629 | return ret; | 
|  | 630 | } | 
|  | 631 |  | 
|  | 632 | ret = registerCompositeStreamListener(getStreamId()); | 
|  | 633 | if (ret != OK) { | 
|  | 634 | ALOGE("%s: Failed to register blob stream listener!", __FUNCTION__); | 
|  | 635 | return ret; | 
|  | 636 | } | 
|  | 637 |  | 
|  | 638 | ret = registerCompositeStreamListener(mDepthStreamId); | 
|  | 639 | if (ret != OK) { | 
|  | 640 | ALOGE("%s: Failed to register depth stream listener!", __FUNCTION__); | 
|  | 641 | return ret; | 
|  | 642 | } | 
|  | 643 |  | 
|  | 644 | mBlobWidth = width; | 
|  | 645 | mBlobHeight = height; | 
|  | 646 |  | 
|  | 647 | return ret; | 
|  | 648 | } | 
|  | 649 |  | 
|  | 650 | status_t DepthCompositeStream::configureStream() { | 
|  | 651 | if (isRunning()) { | 
|  | 652 | // Processing thread is already running, nothing more to do. | 
|  | 653 | return NO_ERROR; | 
|  | 654 | } | 
|  | 655 |  | 
|  | 656 | if (mOutputSurface.get() == nullptr) { | 
|  | 657 | ALOGE("%s: No valid output surface set!", __FUNCTION__); | 
|  | 658 | return NO_INIT; | 
|  | 659 | } | 
|  | 660 |  | 
|  | 661 | auto res = mOutputSurface->connect(NATIVE_WINDOW_API_CAMERA, mProducerListener); | 
|  | 662 | if (res != OK) { | 
|  | 663 | ALOGE("%s: Unable to connect to native window for stream %d", | 
|  | 664 | __FUNCTION__, mBlobStreamId); | 
|  | 665 | return res; | 
|  | 666 | } | 
|  | 667 |  | 
|  | 668 | if ((res = native_window_set_buffers_format(mOutputSurface.get(), HAL_PIXEL_FORMAT_BLOB)) | 
|  | 669 | != OK) { | 
|  | 670 | ALOGE("%s: Unable to configure stream buffer format for stream %d", __FUNCTION__, | 
|  | 671 | mBlobStreamId); | 
|  | 672 | return res; | 
|  | 673 | } | 
|  | 674 |  | 
|  | 675 | int maxProducerBuffers; | 
|  | 676 | ANativeWindow *anw = mBlobSurface.get(); | 
|  | 677 | if ((res = anw->query(anw, NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS, &maxProducerBuffers)) != OK) { | 
|  | 678 | ALOGE("%s: Unable to query consumer undequeued" | 
|  | 679 | " buffer count for stream %d", __FUNCTION__, mBlobStreamId); | 
|  | 680 | return res; | 
|  | 681 | } | 
|  | 682 |  | 
|  | 683 | ANativeWindow *anwConsumer = mOutputSurface.get(); | 
|  | 684 | int maxConsumerBuffers; | 
|  | 685 | if ((res = anwConsumer->query(anwConsumer, NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS, | 
|  | 686 | &maxConsumerBuffers)) != OK) { | 
|  | 687 | ALOGE("%s: Unable to query consumer undequeued" | 
|  | 688 | " buffer count for stream %d", __FUNCTION__, mBlobStreamId); | 
|  | 689 | return res; | 
|  | 690 | } | 
|  | 691 |  | 
|  | 692 | if ((res = native_window_set_buffer_count( | 
|  | 693 | anwConsumer, maxProducerBuffers + maxConsumerBuffers)) != OK) { | 
|  | 694 | ALOGE("%s: Unable to set buffer count for stream %d", __FUNCTION__, mBlobStreamId); | 
|  | 695 | return res; | 
|  | 696 | } | 
|  | 697 |  | 
|  | 698 | run("DepthCompositeStreamProc"); | 
|  | 699 |  | 
|  | 700 | return NO_ERROR; | 
|  | 701 | } | 
|  | 702 |  | 
|  | 703 | status_t DepthCompositeStream::deleteInternalStreams() { | 
|  | 704 | // The 'CameraDeviceClient' parent will delete the blob stream | 
|  | 705 | requestExit(); | 
|  | 706 |  | 
|  | 707 | auto ret = join(); | 
|  | 708 | if (ret != OK) { | 
|  | 709 | ALOGE("%s: Failed to join with the main processing thread: %s (%d)", __FUNCTION__, | 
|  | 710 | strerror(-ret), ret); | 
|  | 711 | } | 
|  | 712 |  | 
| Emilian Peev | 538c90e | 2018-12-17 18:03:19 +0000 | [diff] [blame] | 713 | if (mDepthStreamId >= 0) { | 
| Emilian Peev | c0fe54c | 2020-03-11 14:05:07 -0700 | [diff] [blame] | 714 | // Camera devices may not be valid after switching to offline mode. | 
|  | 715 | // In this case, all offline streams including internal composite streams | 
|  | 716 | // are managed and released by the offline session. | 
|  | 717 | sp<CameraDeviceBase> device = mDevice.promote(); | 
|  | 718 | if (device.get() != nullptr) { | 
|  | 719 | ret = device->deleteStream(mDepthStreamId); | 
|  | 720 | } | 
|  | 721 |  | 
| Emilian Peev | 538c90e | 2018-12-17 18:03:19 +0000 | [diff] [blame] | 722 | mDepthStreamId = -1; | 
|  | 723 | } | 
|  | 724 |  | 
| Shuzhen Wang | 2c54504 | 2019-02-07 10:27:35 -0800 | [diff] [blame] | 725 | if (mOutputSurface != nullptr) { | 
|  | 726 | mOutputSurface->disconnect(NATIVE_WINDOW_API_CAMERA); | 
|  | 727 | mOutputSurface.clear(); | 
|  | 728 | } | 
|  | 729 |  | 
| Emilian Peev | 538c90e | 2018-12-17 18:03:19 +0000 | [diff] [blame] | 730 | return ret; | 
|  | 731 | } | 
|  | 732 |  | 
|  | 733 | void DepthCompositeStream::onFrameAvailable(const BufferItem& item) { | 
|  | 734 | if (item.mDataSpace == kJpegDataSpace) { | 
|  | 735 | ALOGV("%s: Jpeg buffer with ts: %" PRIu64 " ms. arrived!", | 
|  | 736 | __func__, ns2ms(item.mTimestamp)); | 
|  | 737 |  | 
|  | 738 | Mutex::Autolock l(mMutex); | 
|  | 739 | if (!mErrorState) { | 
|  | 740 | mInputJpegBuffers.push_back(item.mTimestamp); | 
|  | 741 | mInputReadyCondition.signal(); | 
|  | 742 | } | 
|  | 743 | } else if (item.mDataSpace == kDepthMapDataSpace) { | 
|  | 744 | ALOGV("%s: Depth buffer with ts: %" PRIu64 " ms. arrived!", __func__, | 
|  | 745 | ns2ms(item.mTimestamp)); | 
|  | 746 |  | 
|  | 747 | Mutex::Autolock l(mMutex); | 
|  | 748 | if (!mErrorState) { | 
|  | 749 | mInputDepthBuffers.push_back(item.mTimestamp); | 
|  | 750 | mInputReadyCondition.signal(); | 
|  | 751 | } | 
|  | 752 | } else { | 
|  | 753 | ALOGE("%s: Unexpected data space: 0x%x", __FUNCTION__, item.mDataSpace); | 
|  | 754 | } | 
|  | 755 | } | 
|  | 756 |  | 
|  | 757 | status_t DepthCompositeStream::insertGbp(SurfaceMap* /*out*/outSurfaceMap, | 
|  | 758 | Vector<int32_t> * /*out*/outputStreamIds, int32_t* /*out*/currentStreamId) { | 
|  | 759 | if (outSurfaceMap->find(mDepthStreamId) == outSurfaceMap->end()) { | 
| Emilian Peev | 538c90e | 2018-12-17 18:03:19 +0000 | [diff] [blame] | 760 | outputStreamIds->push_back(mDepthStreamId); | 
|  | 761 | } | 
|  | 762 | (*outSurfaceMap)[mDepthStreamId].push_back(mDepthSurfaceId); | 
|  | 763 |  | 
|  | 764 | if (outSurfaceMap->find(mBlobStreamId) == outSurfaceMap->end()) { | 
| Emilian Peev | 538c90e | 2018-12-17 18:03:19 +0000 | [diff] [blame] | 765 | outputStreamIds->push_back(mBlobStreamId); | 
|  | 766 | } | 
|  | 767 | (*outSurfaceMap)[mBlobStreamId].push_back(mBlobSurfaceId); | 
|  | 768 |  | 
|  | 769 | if (currentStreamId != nullptr) { | 
|  | 770 | *currentStreamId = mBlobStreamId; | 
|  | 771 | } | 
|  | 772 |  | 
|  | 773 | return NO_ERROR; | 
|  | 774 | } | 
|  | 775 |  | 
| Emilian Peev | 4697b64 | 2019-11-19 17:11:14 -0800 | [diff] [blame] | 776 | status_t DepthCompositeStream::insertCompositeStreamIds( | 
|  | 777 | std::vector<int32_t>* compositeStreamIds /*out*/) { | 
|  | 778 | if (compositeStreamIds == nullptr) { | 
|  | 779 | return BAD_VALUE; | 
|  | 780 | } | 
|  | 781 |  | 
|  | 782 | compositeStreamIds->push_back(mDepthStreamId); | 
|  | 783 | compositeStreamIds->push_back(mBlobStreamId); | 
|  | 784 |  | 
|  | 785 | return OK; | 
|  | 786 | } | 
|  | 787 |  | 
| Emilian Peev | 538c90e | 2018-12-17 18:03:19 +0000 | [diff] [blame] | 788 | void DepthCompositeStream::onResultError(const CaptureResultExtras& resultExtras) { | 
|  | 789 | // Processing can continue even in case of result errors. | 
|  | 790 | // At the moment depth composite stream processing relies mainly on static camera | 
|  | 791 | // characteristics data. The actual result data can be used for the jpeg quality but | 
|  | 792 | // in case it is absent we can default to maximum. | 
|  | 793 | eraseResult(resultExtras.frameNumber); | 
|  | 794 | } | 
|  | 795 |  | 
|  | 796 | bool DepthCompositeStream::onStreamBufferError(const CaptureResultExtras& resultExtras) { | 
|  | 797 | bool ret = false; | 
|  | 798 | // Buffer errors concerning internal composite streams should not be directly visible to | 
|  | 799 | // camera clients. They must only receive a single buffer error with the public composite | 
|  | 800 | // stream id. | 
|  | 801 | if ((resultExtras.errorStreamId == mDepthStreamId) || | 
|  | 802 | (resultExtras.errorStreamId == mBlobStreamId)) { | 
|  | 803 | flagAnErrorFrameNumber(resultExtras.frameNumber); | 
|  | 804 | ret = true; | 
|  | 805 | } | 
|  | 806 |  | 
|  | 807 | return ret; | 
|  | 808 | } | 
|  | 809 |  | 
|  | 810 | status_t DepthCompositeStream::getMatchingDepthSize(size_t width, size_t height, | 
|  | 811 | const std::vector<std::tuple<size_t, size_t>>& supporedDepthSizes, | 
|  | 812 | size_t *depthWidth /*out*/, size_t *depthHeight /*out*/) { | 
|  | 813 | if ((depthWidth == nullptr) || (depthHeight == nullptr)) { | 
|  | 814 | return BAD_VALUE; | 
|  | 815 | } | 
|  | 816 |  | 
|  | 817 | float arTol = CameraProviderManager::kDepthARTolerance; | 
|  | 818 | *depthWidth = *depthHeight = 0; | 
|  | 819 |  | 
|  | 820 | float aspectRatio = static_cast<float> (width) / static_cast<float> (height); | 
|  | 821 | for (const auto& it : supporedDepthSizes) { | 
|  | 822 | auto currentWidth = std::get<0>(it); | 
|  | 823 | auto currentHeight = std::get<1>(it); | 
|  | 824 | if ((currentWidth == width) && (currentHeight == height)) { | 
|  | 825 | *depthWidth = width; | 
|  | 826 | *depthHeight = height; | 
|  | 827 | break; | 
|  | 828 | } else { | 
|  | 829 | float currentRatio = static_cast<float> (currentWidth) / | 
|  | 830 | static_cast<float> (currentHeight); | 
|  | 831 | auto currentSize = currentWidth * currentHeight; | 
|  | 832 | auto oldSize = (*depthWidth) * (*depthHeight); | 
|  | 833 | if ((fabs(aspectRatio - currentRatio) <= arTol) && (currentSize > oldSize)) { | 
|  | 834 | *depthWidth = currentWidth; | 
|  | 835 | *depthHeight = currentHeight; | 
|  | 836 | } | 
|  | 837 | } | 
|  | 838 | } | 
|  | 839 |  | 
|  | 840 | return ((*depthWidth > 0) && (*depthHeight > 0)) ? OK : BAD_VALUE; | 
|  | 841 | } | 
|  | 842 |  | 
| Jayant Chowdhary | 13f9b2f | 2020-12-02 22:46:15 -0800 | [diff] [blame] | 843 | void DepthCompositeStream::getSupportedDepthSizes(const CameraMetadata& ch, bool maxResolution, | 
| Emilian Peev | 538c90e | 2018-12-17 18:03:19 +0000 | [diff] [blame] | 844 | std::vector<std::tuple<size_t, size_t>>* depthSizes /*out*/) { | 
|  | 845 | if (depthSizes == nullptr) { | 
|  | 846 | return; | 
|  | 847 | } | 
|  | 848 |  | 
| Jayant Chowdhary | 13f9b2f | 2020-12-02 22:46:15 -0800 | [diff] [blame] | 849 | auto entry = ch.find( | 
|  | 850 | camera3::SessionConfigurationUtils::getAppropriateModeTag( | 
|  | 851 | ANDROID_DEPTH_AVAILABLE_DEPTH_STREAM_CONFIGURATIONS, maxResolution)); | 
| Emilian Peev | 538c90e | 2018-12-17 18:03:19 +0000 | [diff] [blame] | 852 | if (entry.count > 0) { | 
|  | 853 | // Depth stream dimensions have four int32_t components | 
|  | 854 | // (pixelformat, width, height, type) | 
|  | 855 | size_t entryCount = entry.count / 4; | 
|  | 856 | depthSizes->reserve(entryCount); | 
|  | 857 | for (size_t i = 0; i < entry.count; i += 4) { | 
|  | 858 | if ((entry.data.i32[i] == kDepthMapPixelFormat) && | 
|  | 859 | (entry.data.i32[i+3] == | 
|  | 860 | ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT)) { | 
|  | 861 | depthSizes->push_back(std::make_tuple(entry.data.i32[i+1], | 
|  | 862 | entry.data.i32[i+2])); | 
|  | 863 | } | 
|  | 864 | } | 
|  | 865 | } | 
|  | 866 | } | 
|  | 867 |  | 
|  | 868 | status_t DepthCompositeStream::getCompositeStreamInfo(const OutputStreamInfo &streamInfo, | 
|  | 869 | const CameraMetadata& ch, std::vector<OutputStreamInfo>* compositeOutput /*out*/) { | 
|  | 870 | if (compositeOutput == nullptr) { | 
|  | 871 | return BAD_VALUE; | 
|  | 872 | } | 
|  | 873 |  | 
|  | 874 | std::vector<std::tuple<size_t, size_t>> depthSizes; | 
| Jayant Chowdhary | 13f9b2f | 2020-12-02 22:46:15 -0800 | [diff] [blame] | 875 | std::vector<std::tuple<size_t, size_t>> depthSizesMaximumResolution; | 
|  | 876 | getSupportedDepthSizes(ch, /*maxResolution*/false, &depthSizes); | 
| Emilian Peev | 538c90e | 2018-12-17 18:03:19 +0000 | [diff] [blame] | 877 | if (depthSizes.empty()) { | 
|  | 878 | ALOGE("%s: No depth stream configurations present", __FUNCTION__); | 
|  | 879 | return BAD_VALUE; | 
|  | 880 | } | 
|  | 881 |  | 
| Jayant Chowdhary | 13f9b2f | 2020-12-02 22:46:15 -0800 | [diff] [blame] | 882 | if (SessionConfigurationUtils::isUltraHighResolutionSensor(ch)) { | 
|  | 883 | getSupportedDepthSizes(ch, /*maxResolution*/true, &depthSizesMaximumResolution); | 
|  | 884 | if (depthSizesMaximumResolution.empty()) { | 
|  | 885 | ALOGE("%s: No depth stream configurations for maximum resolution present", | 
|  | 886 | __FUNCTION__); | 
|  | 887 | return BAD_VALUE; | 
|  | 888 | } | 
|  | 889 | } | 
|  | 890 |  | 
|  | 891 | size_t chosenDepthWidth = 0, chosenDepthHeight = 0; | 
|  | 892 | auto ret = checkAndGetMatchingDepthSize(streamInfo.width, streamInfo.height, depthSizes, | 
|  | 893 | depthSizesMaximumResolution, streamInfo.sensorPixelModesUsed, &chosenDepthWidth, | 
|  | 894 | &chosenDepthHeight); | 
|  | 895 |  | 
| Emilian Peev | 538c90e | 2018-12-17 18:03:19 +0000 | [diff] [blame] | 896 | if (ret != OK) { | 
| Jayant Chowdhary | 13f9b2f | 2020-12-02 22:46:15 -0800 | [diff] [blame] | 897 | ALOGE("%s: Couldn't get matching depth sizes", __FUNCTION__); | 
| Emilian Peev | 538c90e | 2018-12-17 18:03:19 +0000 | [diff] [blame] | 898 | return ret; | 
|  | 899 | } | 
|  | 900 |  | 
|  | 901 | compositeOutput->clear(); | 
|  | 902 | compositeOutput->insert(compositeOutput->end(), 2, streamInfo); | 
|  | 903 |  | 
| Jayant Chowdhary | 13f9b2f | 2020-12-02 22:46:15 -0800 | [diff] [blame] | 904 | // Sensor pixel modes should stay the same here. They're already overridden. | 
| Emilian Peev | 538c90e | 2018-12-17 18:03:19 +0000 | [diff] [blame] | 905 | // Jpeg/Blob stream info | 
|  | 906 | (*compositeOutput)[0].dataSpace = kJpegDataSpace; | 
|  | 907 | (*compositeOutput)[0].consumerUsage = GRALLOC_USAGE_SW_READ_OFTEN; | 
|  | 908 |  | 
|  | 909 | // Depth stream info | 
| Jayant Chowdhary | 13f9b2f | 2020-12-02 22:46:15 -0800 | [diff] [blame] | 910 | (*compositeOutput)[1].width = chosenDepthWidth; | 
|  | 911 | (*compositeOutput)[1].height = chosenDepthHeight; | 
| Emilian Peev | 538c90e | 2018-12-17 18:03:19 +0000 | [diff] [blame] | 912 | (*compositeOutput)[1].format = kDepthMapPixelFormat; | 
|  | 913 | (*compositeOutput)[1].dataSpace = kDepthMapDataSpace; | 
|  | 914 | (*compositeOutput)[1].consumerUsage = GRALLOC_USAGE_SW_READ_OFTEN; | 
|  | 915 |  | 
|  | 916 | return NO_ERROR; | 
|  | 917 | } | 
|  | 918 |  | 
| Emilian Peev | 538c90e | 2018-12-17 18:03:19 +0000 | [diff] [blame] | 919 | }; // namespace camera3 | 
|  | 920 | }; // namespace android |