Merge "AudioFlinger: Fix VolumeShaper initialization for DirectOutputThread"
diff --git a/apex/Android.bp b/apex/Android.bp
index 39997d2..c077a77 100644
--- a/apex/Android.bp
+++ b/apex/Android.bp
@@ -16,21 +16,31 @@
name: "com.android.media",
manifest: "manifest.json",
java_libs: ["updatable-media"],
- native_shared_libs: [
- // Extractor plugins
- "libaacextractor",
- "libamrextractor",
- "libflacextractor",
- "libmidiextractor",
- "libmkvextractor",
- "libmp3extractor",
- "libmp4extractor",
- "libmpeg2extractor",
- "liboggextractor",
- "libwavextractor",
- // MediaPlayer2
- "libmedia2_jni",
- ],
+ compile_multilib: "both",
+ multilib: {
+ first: {
+ // Extractor process runs only with the primary ABI.
+ native_shared_libs: [
+ // Extractor plugins
+ "libaacextractor",
+ "libamrextractor",
+ "libflacextractor",
+ "libmidiextractor",
+ "libmkvextractor",
+ "libmp3extractor",
+ "libmp4extractor",
+ "libmpeg2extractor",
+ "liboggextractor",
+ "libwavextractor",
+ ],
+ },
+ both: {
+ native_shared_libs: [
+ // MediaPlayer2
+ "libmedia2_jni",
+ ],
+ },
+ },
key: "com.android.media.key",
}
diff --git a/camera/ndk/include/camera/NdkCameraMetadataTags.h b/camera/ndk/include/camera/NdkCameraMetadataTags.h
index 641816f..c1efa5f 100644
--- a/camera/ndk/include/camera/NdkCameraMetadataTags.h
+++ b/camera/ndk/include/camera/NdkCameraMetadataTags.h
@@ -5549,6 +5549,73 @@
ACAMERA_DEPTH_AVAILABLE_RECOMMENDED_DEPTH_STREAM_CONFIGURATIONS =
// int32[n*5]
ACAMERA_DEPTH_START + 5,
+ /**
+ * <p>The available dynamic depth dataspace stream
+ * configurations that this camera device supports
+ * (i.e. format, width, height, output/input stream).</p>
+ *
+ * <p>Type: int32[n*4] (acamera_metadata_enum_android_depth_available_dynamic_depth_stream_configurations_t)</p>
+ *
+ * <p>This tag may appear in:
+ * <ul>
+ * <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+ * </ul></p>
+ *
+ * <p>These are output stream configurations for use with
+ * dataSpace DYNAMIC_DEPTH. The configurations are
+ * listed as <code>(format, width, height, input?)</code> tuples.</p>
+ * <p>Only devices that support depth output for at least
+ * the HAL_PIXEL_FORMAT_Y16 dense depth map along with
+ * HAL_PIXEL_FORMAT_BLOB with the same size or size with
+ * the same aspect ratio can have dynamic depth dataspace
+ * stream configuration. ACAMERA_DEPTH_DEPTH_IS_EXCLUSIVE also
+ * needs to be set to FALSE.</p>
+ *
+ * @see ACAMERA_DEPTH_DEPTH_IS_EXCLUSIVE
+ */
+ ACAMERA_DEPTH_AVAILABLE_DYNAMIC_DEPTH_STREAM_CONFIGURATIONS =
+ // int32[n*4] (acamera_metadata_enum_android_depth_available_dynamic_depth_stream_configurations_t)
+ ACAMERA_DEPTH_START + 6,
+ /**
+ * <p>This lists the minimum frame duration for each
+ * format/size combination for dynamic depth output streams.</p>
+ *
+ * <p>Type: int64[4*n]</p>
+ *
+ * <p>This tag may appear in:
+ * <ul>
+ * <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+ * </ul></p>
+ *
+ * <p>This should correspond to the frame duration when only that
+ * stream is active, with all processing (typically in android.*.mode)
+ * set to either OFF or FAST.</p>
+ * <p>When multiple streams are used in a request, the minimum frame
+ * duration will be max(individual stream min durations).</p>
+ * <p>The minimum frame duration of a stream (of a particular format, size)
+ * is the same regardless of whether the stream is input or output.</p>
+ */
+ ACAMERA_DEPTH_AVAILABLE_DYNAMIC_DEPTH_MIN_FRAME_DURATIONS = // int64[4*n]
+ ACAMERA_DEPTH_START + 7,
+ /**
+ * <p>This lists the maximum stall duration for each
+ * output format/size combination for dynamic depth streams.</p>
+ *
+ * <p>Type: int64[4*n]</p>
+ *
+ * <p>This tag may appear in:
+ * <ul>
+ * <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+ * </ul></p>
+ *
+ * <p>A stall duration is how much extra time would get added
+ * to the normal minimum frame duration for a repeating request
+ * that has streams with non-zero stall.</p>
+ * <p>All dynamic depth output streams may have a nonzero stall
+ * duration.</p>
+ */
+ ACAMERA_DEPTH_AVAILABLE_DYNAMIC_DEPTH_STALL_DURATIONS = // int64[4*n]
+ ACAMERA_DEPTH_START + 8,
ACAMERA_DEPTH_END,
/**
@@ -8246,6 +8313,16 @@
} acamera_metadata_enum_android_depth_depth_is_exclusive_t;
+// ACAMERA_DEPTH_AVAILABLE_DYNAMIC_DEPTH_STREAM_CONFIGURATIONS
+typedef enum acamera_metadata_enum_acamera_depth_available_dynamic_depth_stream_configurations {
+ ACAMERA_DEPTH_AVAILABLE_DYNAMIC_DEPTH_STREAM_CONFIGURATIONS_OUTPUT
+ = 0,
+
+ ACAMERA_DEPTH_AVAILABLE_DYNAMIC_DEPTH_STREAM_CONFIGURATIONS_INPUT
+ = 1,
+
+} acamera_metadata_enum_android_depth_available_dynamic_depth_stream_configurations_t;
+
// ACAMERA_LOGICAL_MULTI_CAMERA_SENSOR_SYNC_TYPE
typedef enum acamera_metadata_enum_acamera_logical_multi_camera_sensor_sync_type {
diff --git a/media/codec2/components/aac/C2SoftAacEnc.cpp b/media/codec2/components/aac/C2SoftAacEnc.cpp
index 87730ae..d1bdf0d 100644
--- a/media/codec2/components/aac/C2SoftAacEnc.cpp
+++ b/media/codec2/components/aac/C2SoftAacEnc.cpp
@@ -151,6 +151,7 @@
mNumBytesPerInputFrame(0u),
mOutBufferSize(0u),
mSentCodecSpecificData(false),
+ mInputTimeSet(false),
mInputSize(0),
mInputTimeUs(-1ll),
mSignalledError(false),
@@ -176,6 +177,7 @@
c2_status_t C2SoftAacEnc::onStop() {
mSentCodecSpecificData = false;
+ mInputTimeSet = false;
mInputSize = 0u;
mInputTimeUs = -1ll;
mSignalledError = false;
@@ -193,6 +195,7 @@
c2_status_t C2SoftAacEnc::onFlush_sm() {
mSentCodecSpecificData = false;
+ mInputTimeSet = false;
mInputSize = 0u;
return C2_OK;
}
@@ -337,7 +340,6 @@
mOutBufferSize = encInfo.maxOutBufBytes;
mNumBytesPerInputFrame = encInfo.frameLength * channelCount * sizeof(int16_t);
- mInputTimeUs = work->input.ordinal.timestamp;
mSentCodecSpecificData = true;
}
@@ -351,6 +353,10 @@
data = view.data();
capacity = view.capacity();
}
+ if (!mInputTimeSet && capacity > 0) {
+ mInputTimeUs = work->input.ordinal.timestamp;
+ mInputTimeSet = true;
+ }
size_t numFrames = (capacity + mInputSize + (eos ? mNumBytesPerInputFrame - 1 : 0))
/ mNumBytesPerInputFrame;
@@ -550,6 +556,7 @@
(void)pool;
mSentCodecSpecificData = false;
+ mInputTimeSet = false;
mInputSize = 0u;
// TODO: we don't have any pending work at this time to drain.
diff --git a/media/codec2/components/aac/C2SoftAacEnc.h b/media/codec2/components/aac/C2SoftAacEnc.h
index 82fb438..779365b 100644
--- a/media/codec2/components/aac/C2SoftAacEnc.h
+++ b/media/codec2/components/aac/C2SoftAacEnc.h
@@ -57,6 +57,7 @@
UINT mOutBufferSize;
bool mSentCodecSpecificData;
+ bool mInputTimeSet;
size_t mInputSize;
c2_cntr64_t mInputTimeUs;
diff --git a/media/codec2/components/amr_nb_wb/C2SoftAmrNbEnc.cpp b/media/codec2/components/amr_nb_wb/C2SoftAmrNbEnc.cpp
index ca21480..8c03257 100644
--- a/media/codec2/components/amr_nb_wb/C2SoftAmrNbEnc.cpp
+++ b/media/codec2/components/amr_nb_wb/C2SoftAmrNbEnc.cpp
@@ -271,7 +271,7 @@
mFilledLen = 0;
}
ALOGV("causal sample size %d", mFilledLen);
- if (mIsFirst) {
+ if (mIsFirst && outPos != 0) {
mIsFirst = false;
mAnchorTimeStamp = work->input.ordinal.timestamp.peekull();
}
diff --git a/media/codec2/components/amr_nb_wb/C2SoftAmrWbEnc.cpp b/media/codec2/components/amr_nb_wb/C2SoftAmrWbEnc.cpp
index be3892f..074493c 100644
--- a/media/codec2/components/amr_nb_wb/C2SoftAmrWbEnc.cpp
+++ b/media/codec2/components/amr_nb_wb/C2SoftAmrWbEnc.cpp
@@ -347,7 +347,7 @@
mFilledLen = 0;
}
ALOGV("causal sample size %d", mFilledLen);
- if (mIsFirst) {
+ if (mIsFirst && outPos != 0) {
mIsFirst = false;
mAnchorTimeStamp = work->input.ordinal.timestamp.peekull();
}
diff --git a/media/codec2/components/opus/C2SoftOpusEnc.cpp b/media/codec2/components/opus/C2SoftOpusEnc.cpp
index d6ed5ff..68fcea1 100644
--- a/media/codec2/components/opus/C2SoftOpusEnc.cpp
+++ b/media/codec2/components/opus/C2SoftOpusEnc.cpp
@@ -350,7 +350,7 @@
return;
}
}
- if (mIsFirstFrame) {
+ if (mIsFirstFrame && inSize > 0) {
mAnchorTimeStamp = work->input.ordinal.timestamp.peekull();
mIsFirstFrame = false;
}
diff --git a/media/codec2/hidl/1.0/utils/InputSurfaceConnection.cpp b/media/codec2/hidl/1.0/utils/InputSurfaceConnection.cpp
index ba7c2d6..8b1ece3 100644
--- a/media/codec2/hidl/1.0/utils/InputSurfaceConnection.cpp
+++ b/media/codec2/hidl/1.0/utils/InputSurfaceConnection.cpp
@@ -168,7 +168,7 @@
std::shared_ptr<C2GraphicAllocation> alloc;
C2Handle* handle = WrapNativeCodec2GrallocHandle(
- native_handle_clone(buffer->handle),
+ buffer->handle,
buffer->width, buffer->height,
buffer->format, buffer->usage, buffer->stride);
mAllocatorMutex.lock();
diff --git a/media/codec2/sfplugin/C2OMXNode.cpp b/media/codec2/sfplugin/C2OMXNode.cpp
index 9500aed..03d859a 100644
--- a/media/codec2/sfplugin/C2OMXNode.cpp
+++ b/media/codec2/sfplugin/C2OMXNode.cpp
@@ -225,18 +225,14 @@
if (omxBuf.mBufferType == OMXBuffer::kBufferTypeANWBuffer
&& omxBuf.mGraphicBuffer != nullptr) {
std::shared_ptr<C2GraphicAllocation> alloc;
- native_handle_t *clonedHandle = native_handle_clone(omxBuf.mGraphicBuffer->handle);
handle = WrapNativeCodec2GrallocHandle(
- clonedHandle,
+ omxBuf.mGraphicBuffer->handle,
omxBuf.mGraphicBuffer->width,
omxBuf.mGraphicBuffer->height,
omxBuf.mGraphicBuffer->format,
omxBuf.mGraphicBuffer->usage,
omxBuf.mGraphicBuffer->stride);
c2_status_t err = mAllocator->priorGraphicAllocation(handle, &alloc);
- if (clonedHandle) {
- native_handle_delete(clonedHandle);
- }
if (err != OK) {
return UNKNOWN_ERROR;
}
diff --git a/media/codec2/sfplugin/Codec2Buffer.cpp b/media/codec2/sfplugin/Codec2Buffer.cpp
index 1113ae8..597e8f3 100644
--- a/media/codec2/sfplugin/Codec2Buffer.cpp
+++ b/media/codec2/sfplugin/Codec2Buffer.cpp
@@ -578,7 +578,7 @@
ALOGV("VideoNativeMetadata: %dx%d", buffer->width, buffer->height);
C2Handle *handle = WrapNativeCodec2GrallocHandle(
- native_handle_clone(buffer->handle),
+ buffer->handle,
buffer->width,
buffer->height,
buffer->format,
diff --git a/media/codec2/vndk/C2AllocatorGralloc.cpp b/media/codec2/vndk/C2AllocatorGralloc.cpp
index 18f2430..e698bf4 100644
--- a/media/codec2/vndk/C2AllocatorGralloc.cpp
+++ b/media/codec2/vndk/C2AllocatorGralloc.cpp
@@ -159,7 +159,7 @@
return xd != nullptr && xd->magic == MAGIC;
}
- static C2HandleGralloc* WrapNativeHandle(
+ static C2HandleGralloc* WrapAndMoveNativeHandle(
const native_handle_t *const handle,
uint32_t width, uint32_t height, uint32_t format, uint64_t usage,
uint32_t stride, uint32_t generation, uint64_t igbp_id = 0, uint32_t igbp_slot = 0) {
@@ -181,6 +181,26 @@
return reinterpret_cast<C2HandleGralloc *>(res);
}
+ static C2HandleGralloc* WrapNativeHandle(
+ const native_handle_t *const handle,
+ uint32_t width, uint32_t height, uint32_t format, uint64_t usage,
+ uint32_t stride, uint32_t generation, uint64_t igbp_id = 0, uint32_t igbp_slot = 0) {
+ if (handle == nullptr) {
+ return nullptr;
+ }
+ native_handle_t *clone = native_handle_clone(handle);
+ if (clone == nullptr) {
+ return nullptr;
+ }
+ C2HandleGralloc *res = WrapAndMoveNativeHandle(
+ clone, width, height, format, usage, stride, generation, igbp_id, igbp_slot);
+ if (res == nullptr) {
+ native_handle_close(clone);
+ }
+ native_handle_delete(clone);
+ return res;
+ }
+
static native_handle_t* UnwrapNativeHandle(
const C2Handle *const handle) {
const ExtraData *xd = getExtraData(handle);
@@ -366,7 +386,7 @@
if (mHandle) {
mHandle->getIgbpData(&generation, &igbp_id, &igbp_slot);
}
- mLockedHandle = C2HandleGralloc::WrapNativeHandle(
+ mLockedHandle = C2HandleGralloc::WrapAndMoveNativeHandle(
mBuffer, mInfo.mapperInfo.width, mInfo.mapperInfo.height,
(uint32_t)mInfo.mapperInfo.format, mInfo.mapperInfo.usage, mInfo.stride,
generation, igbp_id, igbp_slot);
@@ -743,7 +763,7 @@
return;
}
info.stride = stride;
- buffer = std::move(buffers[0]);
+ buffer = buffers[0];
});
if (err != C2_OK) {
return err;
@@ -752,7 +772,7 @@
allocation->reset(new C2AllocationGralloc(
info, mMapper, buffer,
- C2HandleGralloc::WrapNativeHandle(
+ C2HandleGralloc::WrapAndMoveNativeHandle(
buffer.getNativeHandle(),
info.mapperInfo.width, info.mapperInfo.height,
(uint32_t)info.mapperInfo.format, info.mapperInfo.usage, info.stride,
diff --git a/media/codec2/vndk/platform/C2BqBuffer.cpp b/media/codec2/vndk/platform/C2BqBuffer.cpp
index 7a26035..6e71b98 100644
--- a/media/codec2/vndk/platform/C2BqBuffer.cpp
+++ b/media/codec2/vndk/platform/C2BqBuffer.cpp
@@ -269,36 +269,28 @@
}
}
if (slotBuffer) {
- native_handle_t *grallocHandle = native_handle_clone(slotBuffer->handle);
-
- if (grallocHandle) {
- ALOGV("buffer wraps %llu %d", (unsigned long long)mProducerId, slot);
- C2Handle *c2Handle = android::WrapNativeCodec2GrallocHandle(
- grallocHandle,
- slotBuffer->width,
- slotBuffer->height,
- slotBuffer->format,
- slotBuffer->usage,
- slotBuffer->stride,
- slotBuffer->getGenerationNumber(),
- mProducerId, slot);
- if (c2Handle) {
- // Moved everything to c2Handle.
- native_handle_delete(grallocHandle);
- std::shared_ptr<C2GraphicAllocation> alloc;
- c2_status_t err = mAllocator->priorGraphicAllocation(c2Handle, &alloc);
- if (err != C2_OK) {
- return err;
- }
- std::shared_ptr<C2BufferQueueBlockPoolData> poolData =
- std::make_shared<C2BufferQueueBlockPoolData>(
- slotBuffer->getGenerationNumber(),
- mProducerId, slot, shared_from_this());
- *block = _C2BlockFactory::CreateGraphicBlock(alloc, poolData);
- return C2_OK;
+ ALOGV("buffer wraps %llu %d", (unsigned long long)mProducerId, slot);
+ C2Handle *c2Handle = android::WrapNativeCodec2GrallocHandle(
+ slotBuffer->handle,
+ slotBuffer->width,
+ slotBuffer->height,
+ slotBuffer->format,
+ slotBuffer->usage,
+ slotBuffer->stride,
+ slotBuffer->getGenerationNumber(),
+ mProducerId, slot);
+ if (c2Handle) {
+ std::shared_ptr<C2GraphicAllocation> alloc;
+ c2_status_t err = mAllocator->priorGraphicAllocation(c2Handle, &alloc);
+ if (err != C2_OK) {
+ return err;
}
- native_handle_close(grallocHandle);
- native_handle_delete(grallocHandle);
+ std::shared_ptr<C2BufferQueueBlockPoolData> poolData =
+ std::make_shared<C2BufferQueueBlockPoolData>(
+ slotBuffer->getGenerationNumber(),
+ mProducerId, slot, shared_from_this());
+ *block = _C2BlockFactory::CreateGraphicBlock(alloc, poolData);
+ return C2_OK;
}
// Block was not created. call requestBuffer# again next time.
slotBuffer.clear();
diff --git a/media/extractors/mp4/MPEG4Extractor.cpp b/media/extractors/mp4/MPEG4Extractor.cpp
old mode 100644
new mode 100755
index d0efddd..237f9c8
--- a/media/extractors/mp4/MPEG4Extractor.cpp
+++ b/media/extractors/mp4/MPEG4Extractor.cpp
@@ -345,6 +345,9 @@
case FOURCC("av01"):
return MEDIA_MIMETYPE_VIDEO_AV1;
+ case FOURCC(".mp3"):
+ case 0x6D730055: // "ms U" mp3 audio
+ return MEDIA_MIMETYPE_AUDIO_MPEG;
default:
ALOGW("Unknown fourcc: %c%c%c%c",
(fourcc >> 24) & 0xff,
@@ -1629,6 +1632,8 @@
case FOURCC("twos"):
case FOURCC("sowt"):
case FOURCC("alac"):
+ case FOURCC(".mp3"):
+ case 0x6D730055: // "ms U" mp3 audio
{
if (mIsQT && chunk_type == FOURCC("mp4a")
&& depth >= 1 && mPath[depth - 1] == FOURCC("wave")) {
@@ -2096,9 +2101,10 @@
return ERROR_MALFORMED;
}
- uint8_t buffer[256];
- if (chunk_data_size > (off64_t)sizeof(buffer)) {
- return ERROR_BUFFER_TOO_SMALL;
+ auto tmp = heapbuffer<uint8_t>(chunk_data_size);
+ uint8_t *buffer = tmp.get();
+ if (buffer == NULL) {
+ return -ENOMEM;
}
if (mDataSource->readAt(
@@ -4085,12 +4091,10 @@
return OK;
}
- if (objectTypeIndication == 0x6b) {
- // The media subtype is MP3 audio
- // Our software MP3 audio decoder may not be able to handle
- // packetized MP3 audio; for now, lets just return ERROR_UNSUPPORTED
- ALOGE("MP3 track in MP4/3GPP file is not supported");
- return ERROR_UNSUPPORTED;
+ if (objectTypeIndication == 0x6B || objectTypeIndication == 0x69) {
+ // mp3 audio
+ AMediaFormat_setString(mLastTrack->meta,AMEDIAFORMAT_KEY_MIME, MEDIA_MIMETYPE_AUDIO_MPEG);
+ return OK;
}
if (mLastTrack != NULL) {
diff --git a/media/extractors/mp4/SampleIterator.cpp b/media/extractors/mp4/SampleIterator.cpp
index ec12130..2890b26 100644
--- a/media/extractors/mp4/SampleIterator.cpp
+++ b/media/extractors/mp4/SampleIterator.cpp
@@ -111,8 +111,15 @@
if ((err = getSampleSizeDirect(
firstChunkSampleIndex + i, &sampleSize)) != OK) {
ALOGE("getSampleSizeDirect return error");
- mCurrentChunkSampleSizes.clear();
- return err;
+ // stsc sample count is not sync with stsz sample count
+ if (err == ERROR_OUT_OF_RANGE) {
+ ALOGW("stsc samples(%d) not sync with stsz samples(%d)", mSamplesPerChunk, i);
+ mSamplesPerChunk = i;
+ break;
+ } else{
+ mCurrentChunkSampleSizes.clear();
+ return err;
+ }
}
mCurrentChunkSampleSizes.push(sampleSize);
diff --git a/media/libmedia/BufferingSettings.cpp b/media/libmedia/BufferingSettings.cpp
index 271a238..1cc30c2 100644
--- a/media/libmedia/BufferingSettings.cpp
+++ b/media/libmedia/BufferingSettings.cpp
@@ -27,26 +27,6 @@
: mInitialMarkMs(kNoMark),
mResumePlaybackMarkMs(kNoMark) { }
-status_t BufferingSettings::readFromParcel(const Parcel* parcel) {
- if (parcel == nullptr) {
- return BAD_VALUE;
- }
- mInitialMarkMs = parcel->readInt32();
- mResumePlaybackMarkMs = parcel->readInt32();
-
- return OK;
-}
-
-status_t BufferingSettings::writeToParcel(Parcel* parcel) const {
- if (parcel == nullptr) {
- return BAD_VALUE;
- }
- parcel->writeInt32(mInitialMarkMs);
- parcel->writeInt32(mResumePlaybackMarkMs);
-
- return OK;
-}
-
String8 BufferingSettings::toString() const {
String8 s;
s.appendFormat(
diff --git a/media/libmedia/IMediaPlayer.cpp b/media/libmedia/IMediaPlayer.cpp
index e2eccdd..ea06665 100644
--- a/media/libmedia/IMediaPlayer.cpp
+++ b/media/libmedia/IMediaPlayer.cpp
@@ -179,7 +179,8 @@
{
Parcel data, reply;
data.writeInterfaceToken(IMediaPlayer::getInterfaceDescriptor());
- buffering.writeToParcel(&data);
+ data.writeInt32(buffering.mInitialMarkMs);
+ data.writeInt32(buffering.mResumePlaybackMarkMs);
remote()->transact(SET_BUFFERING_SETTINGS, data, &reply);
return reply.readInt32();
}
@@ -194,7 +195,8 @@
remote()->transact(GET_BUFFERING_SETTINGS, data, &reply);
status_t err = reply.readInt32();
if (err == OK) {
- err = buffering->readFromParcel(&reply);
+ buffering->mInitialMarkMs = reply.readInt32();
+ buffering->mResumePlaybackMarkMs = reply.readInt32();
}
return err;
}
@@ -696,7 +698,8 @@
case SET_BUFFERING_SETTINGS: {
CHECK_INTERFACE(IMediaPlayer, data, reply);
BufferingSettings buffering;
- buffering.readFromParcel(&data);
+ buffering.mInitialMarkMs = data.readInt32();
+ buffering.mResumePlaybackMarkMs = data.readInt32();
reply->writeInt32(setBufferingSettings(buffering));
return NO_ERROR;
} break;
@@ -706,7 +709,8 @@
status_t err = getBufferingSettings(&buffering);
reply->writeInt32(err);
if (err == OK) {
- buffering.writeToParcel(reply);
+ reply->writeInt32(buffering.mInitialMarkMs);
+ reply->writeInt32(buffering.mResumePlaybackMarkMs);
}
return NO_ERROR;
} break;
diff --git a/media/libmedia/include/media/BufferingSettings.h b/media/libmedia/include/media/BufferingSettings.h
index d2a3e40..d97cc00 100644
--- a/media/libmedia/include/media/BufferingSettings.h
+++ b/media/libmedia/include/media/BufferingSettings.h
@@ -17,11 +17,11 @@
#ifndef ANDROID_BUFFERING_SETTINGS_H
#define ANDROID_BUFFERING_SETTINGS_H
-#include <binder/Parcelable.h>
+#include <utils/String8.h>
namespace android {
-struct BufferingSettings : public Parcelable {
+struct BufferingSettings {
static const int kNoMark = -1;
int mInitialMarkMs;
@@ -32,9 +32,6 @@
BufferingSettings();
- status_t writeToParcel(Parcel* parcel) const override;
- status_t readFromParcel(const Parcel* parcel) override;
-
String8 toString() const;
};
diff --git a/media/libmediaplayer2/Android.bp b/media/libmediaplayer2/Android.bp
index b3f7404..00f537d 100644
--- a/media/libmediaplayer2/Android.bp
+++ b/media/libmediaplayer2/Android.bp
@@ -21,7 +21,6 @@
"libgui",
"liblog",
"libmedia_omx",
- "libstagefright_foundation",
"libui",
"libutils",
@@ -55,6 +54,7 @@
"libmediaplayer2-protos",
"libmedia_player2_util",
"libprotobuf-cpp-lite",
+ "libstagefright_foundation_without_imemory",
"libstagefright_nuplayer2",
"libstagefright_player2",
"libstagefright_rtsp",
diff --git a/media/libmediaplayer2/JMedia2HTTPConnection.cpp b/media/libmediaplayer2/JMedia2HTTPConnection.cpp
index d264a7f..e1baa10 100644
--- a/media/libmediaplayer2/JMedia2HTTPConnection.cpp
+++ b/media/libmediaplayer2/JMedia2HTTPConnection.cpp
@@ -21,11 +21,10 @@
#include <mediaplayer2/JavaVMHelper.h>
#include <mediaplayer2/JMedia2HTTPConnection.h>
#include <media/stagefright/foundation/ADebug.h>
-#include <nativehelper/ScopedLocalRef.h>
+#include <nativehelper/scoped_local_ref.h>
#include "log/log.h"
#include "jni.h"
-#include <nativehelper/JNIHelp.h>
namespace android {
diff --git a/media/libmediaplayer2/JMedia2HTTPService.cpp b/media/libmediaplayer2/JMedia2HTTPService.cpp
index 264c15d..20e3573 100644
--- a/media/libmediaplayer2/JMedia2HTTPService.cpp
+++ b/media/libmediaplayer2/JMedia2HTTPService.cpp
@@ -25,8 +25,7 @@
#include <mediaplayer2/JMedia2HTTPConnection.h>
#include <media/stagefright/foundation/ADebug.h>
-#include <nativehelper/JNIHelp.h>
-#include <nativehelper/ScopedLocalRef.h>
+#include <nativehelper/scoped_local_ref.h>
namespace android {
diff --git a/media/libmediaplayer2/mediaplayer2.cpp b/media/libmediaplayer2/mediaplayer2.cpp
index 53f2fb1..ae7ac59 100644
--- a/media/libmediaplayer2/mediaplayer2.cpp
+++ b/media/libmediaplayer2/mediaplayer2.cpp
@@ -1124,8 +1124,10 @@
// completed) so the state change to "prepared" might not have happened yet (e.g., buffering).
// Still, we can allow prepareDrm for the use case of being called in OnDrmInfoListener.
if (!(mCurrentState & (MEDIA_PLAYER2_PREPARING | MEDIA_PLAYER2_PREPARED))) {
- ALOGE("prepareDrm is called in the wrong state (%d).", mCurrentState);
- return INVALID_OPERATION;
+ ALOGW("prepareDrm(%lld) called in non-prepare state(%d)", (long long)srcId, mCurrentState);
+ if (srcId == mSrcId) {
+ return INVALID_OPERATION;
+ }
}
if (drmSessionId.isEmpty()) {
diff --git a/media/libmediaplayer2/nuplayer2/JMediaPlayer2Utils.cpp b/media/libmediaplayer2/nuplayer2/JMediaPlayer2Utils.cpp
index bbd22bc..89703de 100644
--- a/media/libmediaplayer2/nuplayer2/JMediaPlayer2Utils.cpp
+++ b/media/libmediaplayer2/nuplayer2/JMediaPlayer2Utils.cpp
@@ -22,7 +22,6 @@
#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/Utils.h>
-#include <nativehelper/JNIHelp.h>
#include <utils/Log.h>
#include "log/log.h"
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
index df1ffde..6d69d50 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
@@ -678,7 +678,7 @@
msg->setSize("buffer-ix", index);
sp<ABuffer> buffer = mCSDsToSubmit.itemAt(0);
- ALOGI("[%s] resubmitting CSD", mComponentName.c_str());
+ ALOGV("[%s] resubmitting CSD", mComponentName.c_str());
msg->setBuffer("buffer", buffer);
mCSDsToSubmit.removeAt(0);
if (!onInputBufferFetched(msg)) {
@@ -749,7 +749,7 @@
reply->setSize("size", size);
if (eos) {
- ALOGI("[%s] saw output EOS", mIsAudio ? "audio" : "video");
+ ALOGV("[%s] saw output EOS", mIsAudio ? "audio" : "video");
buffer->meta()->setInt32("eos", true);
reply->setInt32("eos", true);
@@ -1029,7 +1029,7 @@
int64_t resumeAtMediaTimeUs;
if (extra->findInt64(
"resume-at-mediaTimeUs", &resumeAtMediaTimeUs)) {
- ALOGI("[%s] suppressing rendering until %lld us",
+ ALOGV("[%s] suppressing rendering until %lld us",
mComponentName.c_str(), (long long)resumeAtMediaTimeUs);
mSkipRenderingUntilMediaTimeUs = resumeAtMediaTimeUs;
}
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
index ba3ebaa..a820445 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
@@ -329,7 +329,7 @@
}
status_t NuPlayerDriver::start() {
- ALOGD("start(%p), state is %d, eos is %d", this, mState, mAtEOS);
+ ALOGV("start(%p), state is %d, eos is %d", this, mState, mAtEOS);
Mutex::Autolock autoLock(mLock);
return start_l();
}
@@ -471,7 +471,7 @@
}
status_t NuPlayerDriver::seekTo(int msec, MediaPlayerSeekMode mode) {
- ALOGD("seekTo(%p) (%d ms, %d) at state %d", this, msec, mode, mState);
+ ALOGV("seekTo(%p) (%d ms, %d) at state %d", this, msec, mode, mState);
Mutex::Autolock autoLock(mLock);
int64_t seekTimeUs = msec * 1000LL;
@@ -965,7 +965,7 @@
void NuPlayerDriver::notifyListener_l(
int msg, int ext1, int ext2, const Parcel *in) {
- ALOGD("notifyListener_l(%p), (%d, %d, %d, %d), loop setting(%d, %d)",
+ ALOGV("notifyListener_l(%p), (%d, %d, %d, %d), loop setting(%d, %d)",
this, msg, ext1, ext2, (in == NULL ? -1 : (int)in->dataSize()), mAutoLoop, mLooping);
switch (msg) {
case MEDIA_PLAYBACK_COMPLETE:
diff --git a/media/ndk/NdkImageReader.cpp b/media/ndk/NdkImageReader.cpp
index 1a0c3b1..010c1aa 100644
--- a/media/ndk/NdkImageReader.cpp
+++ b/media/ndk/NdkImageReader.cpp
@@ -308,6 +308,9 @@
ALOGE("Failed to set BufferItemConsumer buffer dataSpace");
return AMEDIA_ERROR_UNKNOWN;
}
+ if (mUsage & AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT) {
+ gbConsumer->setConsumerIsProtected(true);
+ }
mSurface = new Surface(mProducer, /*controlledByApp*/true);
if (mSurface == nullptr) {
diff --git a/services/audioflinger/Effects.cpp b/services/audioflinger/Effects.cpp
index 6ab6369..8455e54 100644
--- a/services/audioflinger/Effects.cpp
+++ b/services/audioflinger/Effects.cpp
@@ -1136,7 +1136,8 @@
// if controller flag is set (Note that controller == TRUE => EFFECT_FLAG_VOLUME_CTRL set)
if (isProcessEnabled() &&
((mDescriptor.flags & EFFECT_FLAG_VOLUME_MASK) == EFFECT_FLAG_VOLUME_CTRL ||
- (mDescriptor.flags & EFFECT_FLAG_VOLUME_MASK) == EFFECT_FLAG_VOLUME_IND)) {
+ (mDescriptor.flags & EFFECT_FLAG_VOLUME_MASK) == EFFECT_FLAG_VOLUME_IND ||
+ (mDescriptor.flags & EFFECT_FLAG_VOLUME_MASK) == EFFECT_FLAG_VOLUME_MONITOR)) {
uint32_t volume[2];
uint32_t *pVolume = NULL;
uint32_t size = sizeof(volume);
@@ -1331,6 +1332,7 @@
case EFFECT_FLAG_VOLUME_NONE: s.append("none"); break;
case EFFECT_FLAG_VOLUME_CTRL: s.append("implements control"); break;
case EFFECT_FLAG_VOLUME_IND: s.append("requires indication"); break;
+ case EFFECT_FLAG_VOLUME_MONITOR: s.append("monitors volume"); break;
default: s.append("unknown/reserved"); break;
}
s.append(", ");
@@ -2277,7 +2279,7 @@
}
// then indicate volume to all other effects in chain.
// Pass altered volume to effects before volume controller
- // and requested volume to effects after controller
+ // and requested volume to effects after controller or with volume monitor flag
uint32_t lVol = newLeft;
uint32_t rVol = newRight;
@@ -2290,7 +2292,12 @@
lVol = *left;
rVol = *right;
}
- mEffects[i]->setVolume(&lVol, &rVol, false);
+ // Pass requested volume directly if this is volume monitor module
+ if (mEffects[i]->isVolumeMonitor()) {
+ mEffects[i]->setVolume(left, right, false);
+ } else {
+ mEffects[i]->setVolume(&lVol, &rVol, false);
+ }
}
*left = newLeft;
*right = newRight;
diff --git a/services/audioflinger/Effects.h b/services/audioflinger/Effects.h
index 15a26ea..58ce351 100644
--- a/services/audioflinger/Effects.h
+++ b/services/audioflinger/Effects.h
@@ -134,6 +134,9 @@
bool isVolumeControl() const
{ return (mDescriptor.flags & EFFECT_FLAG_VOLUME_MASK)
== EFFECT_FLAG_VOLUME_CTRL; }
+ bool isVolumeMonitor() const
+ { return (mDescriptor.flags & EFFECT_FLAG_VOLUME_MASK)
+ == EFFECT_FLAG_VOLUME_MONITOR; }
status_t setOffloaded(bool offloaded, audio_io_handle_t io);
bool isOffloaded() const;
void addEffectToHal_l();
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
index 55d4db4..c880e67 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
@@ -257,15 +257,21 @@
void AudioInputDescriptor::close()
{
if (mIoHandle != AUDIO_IO_HANDLE_NONE) {
+ // clean up active clients if any (can happen if close() is called to force
+ // clients to reconnect
+ for (const auto &client : getClientIterable()) {
+ if (client->active()) {
+ ALOGW("%s client with port ID %d still active on input %d",
+ __func__, client->portId(), mId);
+ setClientActive(client, false);
+ stop();
+ }
+ }
+
mClientInterface->closeInput(mIoHandle);
LOG_ALWAYS_FATAL_IF(mProfile->curOpenCount < 1, "%s profile open count %u",
__FUNCTION__, mProfile->curOpenCount);
- // do not call stop() here as stop() is supposed to be called after
- // setClientActive(client, false) and we don't know how many clients
- // are still active at this time
- if (isActive()) {
- mProfile->curActiveCount--;
- }
+
mProfile->curOpenCount--;
LOG_ALWAYS_FATAL_IF(mProfile->curOpenCount < mProfile->curActiveCount,
"%s(%d): mProfile->curOpenCount %d < mProfile->curActiveCount %d.",
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
index 57328f0..78b3f45 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
@@ -563,6 +563,17 @@
void SwAudioOutputDescriptor::close()
{
if (mIoHandle != AUDIO_IO_HANDLE_NONE) {
+ // clean up active clients if any (can happen if close() is called to force
+ // clients to reconnect
+ for (const auto &client : getClientIterable()) {
+ if (client->active()) {
+ ALOGW("%s client with port ID %d still active on output %d",
+ __func__, client->portId(), mId);
+ setClientActive(client, false);
+ stop();
+ }
+ }
+
AudioParameter param;
param.add(String8("closing"), String8("true"));
mClientInterface->setParameters(mIoHandle, param.toString());
@@ -571,11 +582,6 @@
LOG_ALWAYS_FATAL_IF(mProfile->curOpenCount < 1, "%s profile open count %u",
__FUNCTION__, mProfile->curOpenCount);
- // do not call stop() here as stop() is supposed to be called after setClientActive(false)
- // and we don't know how many streams are still active at this time
- if (isActive()) {
- mProfile->curActiveCount--;
- }
mProfile->curOpenCount--;
mIoHandle = AUDIO_IO_HANDLE_NONE;
}
diff --git a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
index 1bc4ec8..dc5b238 100644
--- a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
@@ -51,12 +51,6 @@
mEncodedFormats.add(AUDIO_FORMAT_AC3);
mEncodedFormats.add(AUDIO_FORMAT_IEC61937);
}
- // For backward compatibility always indicate support for SBC and AAC if no
- // supported format is listed in the configuration file
- if ((type & AUDIO_DEVICE_OUT_ALL_A2DP) != 0 && mEncodedFormats.isEmpty()) {
- mEncodedFormats.add(AUDIO_FORMAT_SBC);
- mEncodedFormats.add(AUDIO_FORMAT_AAC);
- }
}
audio_port_handle_t DeviceDescriptor::getId() const
@@ -102,11 +96,19 @@
if (!device_has_encoding_capability(type())) {
return true;
}
+ if (mEncodedFormats.isEmpty()) {
+ return true;
+ }
+
return (mCurrentEncodedFormat != AUDIO_FORMAT_DEFAULT);
}
bool DeviceDescriptor::supportsFormat(audio_format_t format)
{
+ if (mEncodedFormats.isEmpty()) {
+ return true;
+ }
+
for (const auto& devFormat : mEncodedFormats) {
if (devFormat == format) {
return true;
diff --git a/services/camera/libcameraservice/Android.bp b/services/camera/libcameraservice/Android.bp
index 851dd69..a090479 100644
--- a/services/camera/libcameraservice/Android.bp
+++ b/services/camera/libcameraservice/Android.bp
@@ -39,6 +39,8 @@
"api1/client2/CaptureSequencer.cpp",
"api1/client2/ZslProcessor.cpp",
"api2/CameraDeviceClient.cpp",
+ "api2/CompositeStream.cpp",
+ "api2/DepthCompositeStream.cpp",
"device1/CameraHardwareInterface.cpp",
"device3/Camera3Device.cpp",
"device3/Camera3Stream.cpp",
@@ -65,6 +67,7 @@
],
shared_libs: [
+ "libdl",
"libui",
"liblog",
"libutilscallstack",
@@ -108,6 +111,8 @@
"system/media/private/camera/include",
"frameworks/native/include/media/openmax",
"frameworks/av/media/ndk",
+ "external/dynamic_depth/includes",
+ "external/dynamic_depth/internal",
],
export_include_dirs: ["."],
@@ -116,6 +121,42 @@
"-Wall",
"-Wextra",
"-Werror",
+ "-Wno-ignored-qualifiers",
+ ],
+
+}
+
+cc_library_shared {
+ name: "libdepthphoto",
+
+ srcs: [
+ "common/DepthPhotoProcessor.cpp",
+ ],
+
+ shared_libs: [
+ "libimage_io",
+ "libdynamic_depth",
+ "libxml2",
+ "liblog",
+ "libutilscallstack",
+ "libutils",
+ "libcutils",
+ "libjpeg",
+ "libmemunreachable",
+ ],
+
+ include_dirs: [
+ "external/dynamic_depth/includes",
+ "external/dynamic_depth/internal",
+ ],
+
+ export_include_dirs: ["."],
+
+ cflags: [
+ "-Wall",
+ "-Wextra",
+ "-Werror",
+ "-Wno-ignored-qualifiers",
],
}
diff --git a/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp b/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp
index b7020fe..e6f75f4 100755
--- a/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp
@@ -62,6 +62,10 @@
}
}
+void JpegProcessor::onBufferRequestForFrameNumber(uint64_t /*frameNumber*/, int /*streamId*/) {
+ // Intentionally left empty
+}
+
void JpegProcessor::onBufferAcquired(const BufferInfo& /*bufferInfo*/) {
// Intentionally left empty
}
diff --git a/services/camera/libcameraservice/api1/client2/JpegProcessor.h b/services/camera/libcameraservice/api1/client2/JpegProcessor.h
index 7187ad9..2ee930e 100644
--- a/services/camera/libcameraservice/api1/client2/JpegProcessor.h
+++ b/services/camera/libcameraservice/api1/client2/JpegProcessor.h
@@ -25,6 +25,7 @@
#include <gui/CpuConsumer.h>
#include "camera/CameraMetadata.h"
+#include "device3/Camera3StreamBufferListener.h"
namespace android {
@@ -53,12 +54,16 @@
// Camera3StreamBufferListener implementation
void onBufferAcquired(const BufferInfo& bufferInfo) override;
void onBufferReleased(const BufferInfo& bufferInfo) override;
+ void onBufferRequestForFrameNumber(uint64_t frameNumber, int streamId) override;
status_t updateStream(const Parameters ¶ms);
status_t deleteStream();
int getStreamId() const;
void dump(int fd, const Vector<String16>& args) const;
+
+ static size_t findJpegSize(uint8_t* jpegBuffer, size_t maxSize);
+
private:
static const nsecs_t kWaitDuration = 10000000; // 10 ms
wp<CameraDeviceBase> mDevice;
@@ -82,7 +87,6 @@
virtual bool threadLoop();
status_t processNewCapture(bool captureSuccess);
- size_t findJpegSize(uint8_t* jpegBuffer, size_t maxSize);
};
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
index 46fbc3e..9e203da 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
@@ -33,6 +33,8 @@
#include <camera_metadata_hidden.h>
+#include "DepthCompositeStream.h"
+
// Convenience methods for constructing binder::Status objects for error returns
#define STATUS_ERROR(errorCode, errorString) \
@@ -143,6 +145,7 @@
binder::Status CameraDeviceClient::insertGbpLocked(const sp<IGraphicBufferProducer>& gbp,
SurfaceMap* outSurfaceMap, Vector<int32_t>* outputStreamIds, int32_t *currentStreamId) {
+ int compositeIdx;
int idx = mStreamMap.indexOfKey(IInterface::asBinder(gbp));
// Trying to submit request with surface that wasn't created
@@ -152,6 +155,11 @@
__FUNCTION__, mCameraIdStr.string());
return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT,
"Request targets Surface that is not part of current capture session");
+ } else if ((compositeIdx = mCompositeStreamMap.indexOfKey(IInterface::asBinder(gbp)))
+ != NAME_NOT_FOUND) {
+ mCompositeStreamMap.valueAt(compositeIdx)->insertGbp(outSurfaceMap, outputStreamIds,
+ currentStreamId);
+ return binder::Status::ok();
}
const StreamSurfaceId& streamSurfaceId = mStreamMap.valueAt(idx);
@@ -489,6 +497,17 @@
mCameraIdStr.string(), strerror(-err), err);
ALOGE("%s: %s", __FUNCTION__, msg.string());
res = STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
+ } else {
+ for (size_t i = 0; i < mCompositeStreamMap.size(); ++i) {
+ err = mCompositeStreamMap.valueAt(i)->configureStream();
+ if (err != OK ) {
+ String8 msg = String8::format("Camera %s: Error configuring composite "
+ "streams: %s (%d)", mCameraIdStr.string(), strerror(-err), err);
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ res = STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
+ break;
+ }
+ }
}
return res;
@@ -692,8 +711,35 @@
return res;
if (!isStreamInfoValid) {
- mapStreamInfo(streamInfo, static_cast<camera3_stream_rotation_t> (it.getRotation()),
- physicalCameraId, &streamConfiguration.streams[streamIdx++]);
+ if (camera3::DepthCompositeStream::isDepthCompositeStream(surface)) {
+ // We need to take in to account that composite streams can have
+ // additional internal camera streams.
+ std::vector<OutputStreamInfo> compositeStreams;
+ ret = camera3::DepthCompositeStream::getCompositeStreamInfo(streamInfo,
+ mDevice->info(), &compositeStreams);
+ if (ret != OK) {
+ String8 msg = String8::format(
+ "Camera %s: Failed adding depth composite streams: %s (%d)",
+ mCameraIdStr.string(), strerror(-ret), ret);
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
+ }
+
+ if (compositeStreams.size() > 1) {
+ streamCount += compositeStreams.size() - 1;
+ streamConfiguration.streams.resize(streamCount);
+ }
+
+ for (const auto& compositeStream : compositeStreams) {
+ mapStreamInfo(compositeStream,
+ static_cast<camera3_stream_rotation_t> (it.getRotation()),
+ physicalCameraId, &streamConfiguration.streams[streamIdx++]);
+ }
+ } else {
+ mapStreamInfo(streamInfo,
+ static_cast<camera3_stream_rotation_t> (it.getRotation()),
+ physicalCameraId, &streamConfiguration.streams[streamIdx++]);
+ }
isStreamInfoValid = true;
}
}
@@ -743,6 +789,7 @@
bool isInput = false;
std::vector<sp<IBinder>> surfaces;
ssize_t dIndex = NAME_NOT_FOUND;
+ ssize_t compositeIndex = NAME_NOT_FOUND;
if (mInputStream.configured && mInputStream.id == streamId) {
isInput = true;
@@ -762,6 +809,13 @@
}
}
+ for (size_t i = 0; i < mCompositeStreamMap.size(); ++i) {
+ if (streamId == mCompositeStreamMap.valueAt(i)->getStreamId()) {
+ compositeIndex = i;
+ break;
+ }
+ }
+
if (surfaces.empty() && dIndex == NAME_NOT_FOUND) {
String8 msg = String8::format("Camera %s: Invalid stream ID (%d) specified, no such"
" stream created yet", mCameraIdStr.string(), streamId);
@@ -791,6 +845,19 @@
if (dIndex != NAME_NOT_FOUND) {
mDeferredStreams.removeItemsAt(dIndex);
}
+
+ if (compositeIndex != NAME_NOT_FOUND) {
+ status_t ret;
+ if ((ret = mCompositeStreamMap.valueAt(compositeIndex)->deleteStream())
+ != OK) {
+ String8 msg = String8::format("Camera %s: Unexpected error %s (%d) when "
+ "deleting composite stream %d", mCameraIdStr.string(), strerror(-err), err,
+ streamId);
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ res = STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
+ }
+ mCompositeStreamMap.removeItemsAt(compositeIndex);
+ }
}
}
@@ -870,11 +937,25 @@
int streamId = camera3::CAMERA3_STREAM_ID_INVALID;
std::vector<int> surfaceIds;
- err = mDevice->createStream(surfaces, deferredConsumer, streamInfo.width,
- streamInfo.height, streamInfo.format, streamInfo.dataSpace,
- static_cast<camera3_stream_rotation_t>(outputConfiguration.getRotation()),
- &streamId, physicalCameraId, &surfaceIds, outputConfiguration.getSurfaceSetID(),
- isShared);
+ if (!camera3::DepthCompositeStream::isDepthCompositeStream(surfaces[0])) {
+ err = mDevice->createStream(surfaces, deferredConsumer, streamInfo.width,
+ streamInfo.height, streamInfo.format, streamInfo.dataSpace,
+ static_cast<camera3_stream_rotation_t>(outputConfiguration.getRotation()),
+ &streamId, physicalCameraId, &surfaceIds, outputConfiguration.getSurfaceSetID(),
+ isShared);
+ } else {
+ sp<CompositeStream> compositeStream = new camera3::DepthCompositeStream(mDevice,
+ getRemoteCallback());
+ err = compositeStream->createStream(surfaces, deferredConsumer, streamInfo.width,
+ streamInfo.height, streamInfo.format,
+ static_cast<camera3_stream_rotation_t>(outputConfiguration.getRotation()),
+ &streamId, physicalCameraId, &surfaceIds, outputConfiguration.getSurfaceSetID(),
+ isShared);
+ if (err == OK) {
+ mCompositeStreamMap.add(IInterface::asBinder(surfaces[0]->getIGraphicBufferProducer()),
+ compositeStream);
+ }
+ }
if (err != OK) {
res = STATUS_ERROR_FMT(CameraService::ERROR_INVALID_OPERATION,
@@ -1808,7 +1889,14 @@
// Thread safe. Don't bother locking.
sp<hardware::camera2::ICameraDeviceCallbacks> remoteCb = getRemoteCallback();
- if (remoteCb != 0) {
+ // Composites can have multiple internal streams. Error notifications coming from such internal
+ // streams may need to remain within camera service.
+ bool skipClientNotification = false;
+ for (size_t i = 0; i < mCompositeStreamMap.size(); i++) {
+ skipClientNotification |= mCompositeStreamMap.valueAt(i)->onError(errorCode, resultExtras);
+ }
+
+ if ((remoteCb != 0) && (!skipClientNotification)) {
remoteCb->onDeviceError(errorCode, resultExtras);
}
}
@@ -1901,6 +1989,10 @@
remoteCb->onResultReceived(result.mMetadata, result.mResultExtras,
result.mPhysicalMetadatas);
}
+
+ for (size_t i = 0; i < mCompositeStreamMap.size(); i++) {
+ mCompositeStreamMap.valueAt(i)->onResultAvailable(result);
+ }
}
binder::Status CameraDeviceClient::checkPidStatus(const char* checkLocation) {
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.h b/services/camera/libcameraservice/api2/CameraDeviceClient.h
index 17a0983..1c5abb0 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.h
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.h
@@ -26,8 +26,10 @@
#include "CameraService.h"
#include "common/FrameProcessorBase.h"
#include "common/Camera2ClientBase.h"
+#include "CompositeStream.h"
using android::camera3::OutputStreamInfo;
+using android::camera3::CompositeStream;
namespace android {
@@ -314,6 +316,8 @@
// stream ID -> outputStreamInfo mapping
std::unordered_map<int32_t, OutputStreamInfo> mStreamInfoMap;
+ KeyedVector<sp<IBinder>, sp<CompositeStream>> mCompositeStreamMap;
+
static const int32_t MAX_SURFACES_PER_STREAM = 4;
sp<CameraProviderManager> mProviderManager;
};
diff --git a/services/camera/libcameraservice/api2/CompositeStream.cpp b/services/camera/libcameraservice/api2/CompositeStream.cpp
new file mode 100644
index 0000000..796bf42
--- /dev/null
+++ b/services/camera/libcameraservice/api2/CompositeStream.cpp
@@ -0,0 +1,203 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "Camera3-CompositeStream"
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+//#define LOG_NDEBUG 0
+
+#include <utils/Log.h>
+#include <utils/Trace.h>
+
+#include "common/CameraDeviceBase.h"
+#include "CameraDeviceClient.h"
+#include "CompositeStream.h"
+
+namespace android {
+namespace camera3 {
+
+CompositeStream::CompositeStream(wp<CameraDeviceBase> device,
+ wp<hardware::camera2::ICameraDeviceCallbacks> cb) :
+ mDevice(device),
+ mRemoteCallback(cb),
+ mNumPartialResults(1),
+ mErrorState(false) {
+ sp<CameraDeviceBase> cameraDevice = device.promote();
+ if (cameraDevice.get() != nullptr) {
+ CameraMetadata staticInfo = cameraDevice->info();
+ camera_metadata_entry_t entry = staticInfo.find(ANDROID_REQUEST_PARTIAL_RESULT_COUNT);
+ if (entry.count > 0) {
+ mNumPartialResults = entry.data.i32[0];
+ }
+ }
+}
+
+status_t CompositeStream::createStream(const std::vector<sp<Surface>>& consumers,
+ bool hasDeferredConsumer, uint32_t width, uint32_t height, int format,
+ camera3_stream_rotation_t rotation, int * id, const String8& physicalCameraId,
+ std::vector<int> * surfaceIds, int streamSetId, bool isShared) {
+ if (hasDeferredConsumer) {
+ ALOGE("%s: Deferred consumers not supported in case of composite streams!",
+ __FUNCTION__);
+ return BAD_VALUE;
+ }
+
+ if (streamSetId != camera3::CAMERA3_STREAM_ID_INVALID) {
+ ALOGE("%s: Surface groups not supported in case of composite streams!",
+ __FUNCTION__);
+ return BAD_VALUE;
+ }
+
+ if (isShared) {
+ ALOGE("%s: Shared surfaces not supported in case of composite streams!",
+ __FUNCTION__);
+ return BAD_VALUE;
+ }
+
+ return createInternalStreams(consumers, hasDeferredConsumer, width, height, format, rotation, id,
+ physicalCameraId, surfaceIds, streamSetId, isShared);
+}
+
+status_t CompositeStream::deleteStream() {
+ {
+ Mutex::Autolock l(mMutex);
+ mPendingCaptureResults.clear();
+ mCaptureResults.clear();
+ mFrameNumberMap.clear();
+ mErrorFrameNumbers.clear();
+ }
+
+ return deleteInternalStreams();
+}
+
+void CompositeStream::onBufferRequestForFrameNumber(uint64_t frameNumber, int streamId) {
+ Mutex::Autolock l(mMutex);
+ if (!mErrorState && (streamId == getStreamId())) {
+ mPendingCaptureResults.emplace(frameNumber, CameraMetadata());
+ }
+}
+
+void CompositeStream::onBufferReleased(const BufferInfo& bufferInfo) {
+ Mutex::Autolock l(mMutex);
+ if (!mErrorState && !bufferInfo.mError) {
+ mFrameNumberMap.emplace(bufferInfo.mFrameNumber, bufferInfo.mTimestamp);
+ mInputReadyCondition.signal();
+ }
+}
+
+void CompositeStream::eraseResult(int64_t frameNumber) {
+ Mutex::Autolock l(mMutex);
+
+ auto it = mPendingCaptureResults.find(frameNumber);
+ if (it == mPendingCaptureResults.end()) {
+ return;
+ }
+
+ it = mPendingCaptureResults.erase(it);
+}
+
+void CompositeStream::onResultAvailable(const CaptureResult& result) {
+ bool resultError = false;
+ {
+ Mutex::Autolock l(mMutex);
+
+ uint64_t frameNumber = result.mResultExtras.frameNumber;
+ bool resultReady = false;
+ auto it = mPendingCaptureResults.find(frameNumber);
+ if (it != mPendingCaptureResults.end()) {
+ it->second.append(result.mMetadata);
+ if (result.mResultExtras.partialResultCount >= mNumPartialResults) {
+ auto entry = it->second.find(ANDROID_SENSOR_TIMESTAMP);
+ if (entry.count == 1) {
+ auto ts = entry.data.i64[0];
+ mCaptureResults.emplace(ts, std::make_tuple(frameNumber, it->second));
+ resultReady = true;
+ } else {
+ ALOGE("%s: Timestamp metadata entry missing for frameNumber: %" PRIu64,
+ __FUNCTION__, frameNumber);
+ resultError = true;
+ }
+ mPendingCaptureResults.erase(it);
+ }
+ }
+
+ if (resultReady) {
+ mInputReadyCondition.signal();
+ }
+ }
+
+ if (resultError) {
+ onResultError(result.mResultExtras);
+ }
+}
+
+void CompositeStream::flagAnErrorFrameNumber(int64_t frameNumber) {
+ Mutex::Autolock l(mMutex);
+ mErrorFrameNumbers.emplace(frameNumber);
+ mInputReadyCondition.signal();
+}
+
+status_t CompositeStream::registerCompositeStreamListener(int32_t streamId) {
+ sp<CameraDeviceBase> device = mDevice.promote();
+ if (device.get() == nullptr) {
+ return NO_INIT;
+ }
+
+ auto ret = device->addBufferListenerForStream(streamId, this);
+ if (ret != OK) {
+ ALOGE("%s: Failed to register composite stream listener!", __FUNCTION__);
+ }
+
+ return ret;
+}
+
+bool CompositeStream::onError(int32_t errorCode, const CaptureResultExtras& resultExtras) {
+ auto ret = false;
+ switch (errorCode) {
+ case hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_RESULT:
+ onResultError(resultExtras);
+ break;
+ case hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_BUFFER:
+ ret = onStreamBufferError(resultExtras);
+ break;
+ case hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_REQUEST:
+ // Invalid request, this shouldn't affect composite streams.
+ break;
+ default:
+ ALOGE("%s: Unrecoverable error: %d detected!", __FUNCTION__, errorCode);
+ Mutex::Autolock l(mMutex);
+ mErrorState = true;
+ break;
+ }
+
+ return ret;
+}
+
+void CompositeStream::notifyError(int64_t frameNumber) {
+ sp<hardware::camera2::ICameraDeviceCallbacks> remoteCb =
+ mRemoteCallback.promote();
+
+ if ((frameNumber >= 0) && (remoteCb.get() != nullptr)) {
+ CaptureResultExtras extras;
+ extras.errorStreamId = getStreamId();
+ extras.frameNumber = frameNumber;
+ remoteCb->onDeviceError(
+ hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_BUFFER,
+ extras);
+ }
+}
+
+}; // namespace camera3
+}; // namespace android
diff --git a/services/camera/libcameraservice/api2/CompositeStream.h b/services/camera/libcameraservice/api2/CompositeStream.h
new file mode 100644
index 0000000..5837745
--- /dev/null
+++ b/services/camera/libcameraservice/api2/CompositeStream.h
@@ -0,0 +1,120 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA_CAMERA3_COMPOSITE_STREAM_H
+#define ANDROID_SERVERS_CAMERA_CAMERA3_COMPOSITE_STREAM_H
+
+#include <set>
+#include <unordered_map>
+
+#include <android/hardware/camera2/ICameraDeviceCallbacks.h>
+#include <camera/CameraMetadata.h>
+#include <camera/camera2/OutputConfiguration.h>
+#include "common/CameraDeviceBase.h"
+#include "device3/Camera3StreamInterface.h"
+
+namespace android {
+
+class CameraDeviceClient;
+class CameraMetadata;
+class Surface;
+
+namespace camera3 {
+
+class CompositeStream : public camera3::Camera3StreamBufferListener {
+
+public:
+ CompositeStream(wp<CameraDeviceBase> device, wp<hardware::camera2::ICameraDeviceCallbacks> cb);
+ virtual ~CompositeStream() {}
+
+ status_t createStream(const std::vector<sp<Surface>>& consumers,
+ bool hasDeferredConsumer, uint32_t width, uint32_t height, int format,
+ camera3_stream_rotation_t rotation, int *id, const String8& physicalCameraId,
+ std::vector<int> *surfaceIds, int streamSetId, bool isShared);
+
+ status_t deleteStream();
+
+ // Create and register all internal camera streams.
+ virtual status_t createInternalStreams(const std::vector<sp<Surface>>& consumers,
+ bool hasDeferredConsumer, uint32_t width, uint32_t height, int format,
+ camera3_stream_rotation_t rotation, int *id, const String8& physicalCameraId,
+ std::vector<int> *surfaceIds, int streamSetId, bool isShared) = 0;
+
+ // Release all internal streams and corresponding resources.
+ virtual status_t deleteInternalStreams() = 0;
+
+ // Stream configuration completed.
+ virtual status_t configureStream() = 0;
+
+ // Insert the internal composite stream id in the user capture request.
+ virtual status_t insertGbp(SurfaceMap* /*out*/outSurfaceMap,
+ Vector<int32_t>* /*out*/outputStreamIds, int32_t* /*out*/currentStreamId) = 0;
+
+ // Return composite stream id.
+ virtual int getStreamId() = 0;
+
+ void onResultAvailable(const CaptureResult& result);
+ bool onError(int32_t errorCode, const CaptureResultExtras& resultExtras);
+
+ // Camera3StreamBufferListener implementation
+ void onBufferAcquired(const BufferInfo& /*bufferInfo*/) override { /*Empty for now */ }
+ void onBufferReleased(const BufferInfo& bufferInfo) override;
+ void onBufferRequestForFrameNumber(uint64_t frameNumber, int streamId) override;
+
+protected:
+ status_t registerCompositeStreamListener(int32_t streamId);
+ void eraseResult(int64_t frameNumber);
+ void flagAnErrorFrameNumber(int64_t frameNumber);
+ void notifyError(int64_t frameNumber);
+
+ // Subclasses should check for buffer errors from internal streams and return 'true' in
+ // case the error notification should remain within camera service.
+ virtual bool onStreamBufferError(const CaptureResultExtras& resultExtras) = 0;
+
+ // Subclasses can decide how to handle result errors depending on whether or not the
+ // internal processing needs result data.
+ virtual void onResultError(const CaptureResultExtras& resultExtras) = 0;
+
+ // Device and/or service is in unrecoverable error state.
+ // Composite streams should behave accordingly.
+ void enableErrorState();
+
+ wp<CameraDeviceBase> mDevice;
+ wp<hardware::camera2::ICameraDeviceCallbacks> mRemoteCallback;
+
+ mutable Mutex mMutex;
+ Condition mInputReadyCondition;
+ int32_t mNumPartialResults;
+ bool mErrorState;
+
+ // Frame number to capture result map of partial pending request results.
+ std::unordered_map<uint64_t, CameraMetadata> mPendingCaptureResults;
+
+ // Timestamp to capture (frame number, result) map of completed pending request results.
+ std::unordered_map<int64_t, std::tuple<int64_t, CameraMetadata>> mCaptureResults;
+
+ // Frame number to timestamp map
+ std::unordered_map<int64_t, int64_t> mFrameNumberMap;
+
+ // Keeps a set buffer/result frame numbers for any errors detected during processing.
+ std::set<int64_t> mErrorFrameNumbers;
+
+};
+
+}; //namespace camera3
+}; //namespace android
+
+#endif
diff --git a/services/camera/libcameraservice/api2/DepthCompositeStream.cpp b/services/camera/libcameraservice/api2/DepthCompositeStream.cpp
new file mode 100644
index 0000000..f627b25
--- /dev/null
+++ b/services/camera/libcameraservice/api2/DepthCompositeStream.cpp
@@ -0,0 +1,802 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "Camera3-DepthCompositeStream"
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+//#define LOG_NDEBUG 0
+
+#include "api1/client2/JpegProcessor.h"
+#include "common/CameraProviderManager.h"
+#include "dlfcn.h"
+#include <gui/Surface.h>
+#include <utils/Log.h>
+#include <utils/Trace.h>
+
+#include "DepthCompositeStream.h"
+
+namespace android {
+namespace camera3 {
+
+DepthCompositeStream::DepthCompositeStream(wp<CameraDeviceBase> device,
+ wp<hardware::camera2::ICameraDeviceCallbacks> cb) :
+ CompositeStream(device, cb),
+ mBlobStreamId(-1),
+ mBlobSurfaceId(-1),
+ mDepthStreamId(-1),
+ mDepthSurfaceId(-1),
+ mBlobWidth(0),
+ mBlobHeight(0),
+ mDepthBufferAcquired(false),
+ mBlobBufferAcquired(false),
+ mProducerListener(new ProducerListener()),
+ mMaxJpegSize(-1),
+ mIsLogicalCamera(false),
+ mDepthPhotoLibHandle(nullptr),
+ mDepthPhotoProcess(nullptr) {
+ sp<CameraDeviceBase> cameraDevice = device.promote();
+ if (cameraDevice.get() != nullptr) {
+ CameraMetadata staticInfo = cameraDevice->info();
+ auto entry = staticInfo.find(ANDROID_JPEG_MAX_SIZE);
+ if (entry.count > 0) {
+ mMaxJpegSize = entry.data.i32[0];
+ } else {
+ ALOGW("%s: Maximum jpeg size absent from camera characteristics", __FUNCTION__);
+ }
+
+ entry = staticInfo.find(ANDROID_LENS_INTRINSIC_CALIBRATION);
+ if (entry.count == 5) {
+ mInstrinsicCalibration.reserve(5);
+ mInstrinsicCalibration.insert(mInstrinsicCalibration.end(), entry.data.f,
+ entry.data.f + 5);
+ } else {
+ ALOGW("%s: Intrinsic calibration absent from camera characteristics!", __FUNCTION__);
+ }
+
+ entry = staticInfo.find(ANDROID_LENS_DISTORTION);
+ if (entry.count == 5) {
+ mLensDistortion.reserve(5);
+ mLensDistortion.insert(mLensDistortion.end(), entry.data.f, entry.data.f + 5);
+ } else {
+ ALOGW("%s: Lens distortion absent from camera characteristics!", __FUNCTION__);
+ }
+
+ entry = staticInfo.find(ANDROID_REQUEST_AVAILABLE_CAPABILITIES);
+ for (size_t i = 0; i < entry.count; ++i) {
+ uint8_t capability = entry.data.u8[i];
+ if (capability == ANDROID_REQUEST_AVAILABLE_CAPABILITIES_LOGICAL_MULTI_CAMERA) {
+ mIsLogicalCamera = true;
+ break;
+ }
+ }
+
+ getSupportedDepthSizes(staticInfo, &mSupportedDepthSizes);
+
+ mDepthPhotoLibHandle = dlopen(camera3::kDepthPhotoLibrary, RTLD_NOW | RTLD_LOCAL);
+ if (mDepthPhotoLibHandle != nullptr) {
+ mDepthPhotoProcess = reinterpret_cast<camera3::process_depth_photo_frame> (
+ dlsym(mDepthPhotoLibHandle, camera3::kDepthPhotoProcessFunction));
+ if (mDepthPhotoProcess == nullptr) {
+ ALOGE("%s: Failed to link to depth photo process function: %s", __FUNCTION__,
+ dlerror());
+ }
+ } else {
+ ALOGE("%s: Failed to link to depth photo library: %s", __FUNCTION__, dlerror());
+ }
+
+ }
+}
+
+DepthCompositeStream::~DepthCompositeStream() {
+ mBlobConsumer.clear(),
+ mBlobSurface.clear(),
+ mBlobStreamId = -1;
+ mBlobSurfaceId = -1;
+ mDepthConsumer.clear();
+ mDepthSurface.clear();
+ mDepthConsumer = nullptr;
+ mDepthSurface = nullptr;
+ if (mDepthPhotoLibHandle != nullptr) {
+ dlclose(mDepthPhotoLibHandle);
+ mDepthPhotoLibHandle = nullptr;
+ }
+ mDepthPhotoProcess = nullptr;
+}
+
+void DepthCompositeStream::compilePendingInputLocked() {
+ CpuConsumer::LockedBuffer imgBuffer;
+
+ while (!mInputJpegBuffers.empty() && !mBlobBufferAcquired) {
+ auto it = mInputJpegBuffers.begin();
+ auto res = mBlobConsumer->lockNextBuffer(&imgBuffer);
+ if (res == NOT_ENOUGH_DATA) {
+ // Can not lock any more buffers.
+ break;
+ } else if (res != OK) {
+ ALOGE("%s: Error locking blob image buffer: %s (%d)", __FUNCTION__,
+ strerror(-res), res);
+ mPendingInputFrames[*it].error = true;
+ mInputJpegBuffers.erase(it);
+ continue;
+ }
+
+ if (*it != imgBuffer.timestamp) {
+ ALOGW("%s: Expecting jpeg buffer with time stamp: %" PRId64 " received buffer with "
+ "time stamp: %" PRId64, __FUNCTION__, *it, imgBuffer.timestamp);
+ }
+
+ if ((mPendingInputFrames.find(imgBuffer.timestamp) != mPendingInputFrames.end()) &&
+ (mPendingInputFrames[imgBuffer.timestamp].error)) {
+ mBlobConsumer->unlockBuffer(imgBuffer);
+ } else {
+ mPendingInputFrames[imgBuffer.timestamp].jpegBuffer = imgBuffer;
+ mBlobBufferAcquired = true;
+ }
+ mInputJpegBuffers.erase(it);
+ }
+
+ while (!mInputDepthBuffers.empty() && !mDepthBufferAcquired) {
+ auto it = mInputDepthBuffers.begin();
+ auto res = mDepthConsumer->lockNextBuffer(&imgBuffer);
+ if (res == NOT_ENOUGH_DATA) {
+ // Can not lock any more buffers.
+ break;
+ } else if (res != OK) {
+ ALOGE("%s: Error receiving depth image buffer: %s (%d)", __FUNCTION__,
+ strerror(-res), res);
+ mPendingInputFrames[*it].error = true;
+ mInputDepthBuffers.erase(it);
+ continue;
+ }
+
+ if (*it != imgBuffer.timestamp) {
+ ALOGW("%s: Expecting depth buffer with time stamp: %" PRId64 " received buffer with "
+ "time stamp: %" PRId64, __FUNCTION__, *it, imgBuffer.timestamp);
+ }
+
+ if ((mPendingInputFrames.find(imgBuffer.timestamp) != mPendingInputFrames.end()) &&
+ (mPendingInputFrames[imgBuffer.timestamp].error)) {
+ mDepthConsumer->unlockBuffer(imgBuffer);
+ } else {
+ mPendingInputFrames[imgBuffer.timestamp].depthBuffer = imgBuffer;
+ mDepthBufferAcquired = true;
+ }
+ mInputDepthBuffers.erase(it);
+ }
+
+ while (!mCaptureResults.empty()) {
+ auto it = mCaptureResults.begin();
+ // Negative timestamp indicates that something went wrong during the capture result
+ // collection process.
+ if (it->first >= 0) {
+ mPendingInputFrames[it->first].frameNumber = std::get<0>(it->second);
+ mPendingInputFrames[it->first].result = std::get<1>(it->second);
+ }
+ mCaptureResults.erase(it);
+ }
+
+ while (!mFrameNumberMap.empty()) {
+ auto it = mFrameNumberMap.begin();
+ mPendingInputFrames[it->second].frameNumber = it->first;
+ mFrameNumberMap.erase(it);
+ }
+
+ auto it = mErrorFrameNumbers.begin();
+ while (it != mErrorFrameNumbers.end()) {
+ bool frameFound = false;
+ for (auto &inputFrame : mPendingInputFrames) {
+ if (inputFrame.second.frameNumber == *it) {
+ inputFrame.second.error = true;
+ frameFound = true;
+ break;
+ }
+ }
+
+ if (frameFound) {
+ it = mErrorFrameNumbers.erase(it);
+ } else {
+ ALOGW("%s: Not able to find failing input with frame number: %" PRId64, __FUNCTION__,
+ *it);
+ it++;
+ }
+ }
+}
+
+bool DepthCompositeStream::getNextReadyInputLocked(int64_t *currentTs /*inout*/) {
+ if (currentTs == nullptr) {
+ return false;
+ }
+
+ bool newInputAvailable = false;
+ for (const auto& it : mPendingInputFrames) {
+ if ((!it.second.error) && (it.second.depthBuffer.data != nullptr) &&
+ (it.second.jpegBuffer.data != nullptr) && (it.first < *currentTs)) {
+ *currentTs = it.first;
+ newInputAvailable = true;
+ }
+ }
+
+ return newInputAvailable;
+}
+
+int64_t DepthCompositeStream::getNextFailingInputLocked(int64_t *currentTs /*inout*/) {
+ int64_t ret = -1;
+ if (currentTs == nullptr) {
+ return ret;
+ }
+
+ for (const auto& it : mPendingInputFrames) {
+ if (it.second.error && !it.second.errorNotified && (it.first < *currentTs)) {
+ *currentTs = it.first;
+ ret = it.second.frameNumber;
+ }
+ }
+
+ return ret;
+}
+
+status_t DepthCompositeStream::processInputFrame(const InputFrame &inputFrame) {
+ status_t res;
+ sp<ANativeWindow> outputANW = mOutputSurface;
+ ANativeWindowBuffer *anb;
+ int fenceFd;
+ void *dstBuffer;
+
+ auto jpegSize = android::camera2::JpegProcessor::findJpegSize(inputFrame.jpegBuffer.data,
+ inputFrame.jpegBuffer.width);
+ if (jpegSize == 0) {
+ ALOGW("%s: Failed to find input jpeg size, default to using entire buffer!", __FUNCTION__);
+ jpegSize = inputFrame.jpegBuffer.width;
+ }
+
+ size_t maxDepthJpegSize;
+ if (mMaxJpegSize > 0) {
+ maxDepthJpegSize = mMaxJpegSize;
+ } else {
+ maxDepthJpegSize = std::max<size_t> (jpegSize,
+ inputFrame.depthBuffer.width * inputFrame.depthBuffer.height * 3 / 2);
+ }
+ uint8_t jpegQuality = 100;
+ auto entry = inputFrame.result.find(ANDROID_JPEG_QUALITY);
+ if (entry.count > 0) {
+ jpegQuality = entry.data.u8[0];
+ }
+
+ // The final depth photo will consist of the main jpeg buffer, the depth map buffer (also in
+ // jpeg format) and confidence map (jpeg as well). Assume worst case that all 3 jpeg need
+ // max jpeg size.
+ size_t finalJpegBufferSize = maxDepthJpegSize * 3;
+
+ if ((res = native_window_set_buffers_dimensions(mOutputSurface.get(), finalJpegBufferSize, 1))
+ != OK) {
+ ALOGE("%s: Unable to configure stream buffer dimensions"
+ " %zux%u for stream %d", __FUNCTION__, finalJpegBufferSize, 1U, mBlobStreamId);
+ return res;
+ }
+
+ res = outputANW->dequeueBuffer(mOutputSurface.get(), &anb, &fenceFd);
+ if (res != OK) {
+ ALOGE("%s: Error retrieving output buffer: %s (%d)", __FUNCTION__, strerror(-res),
+ res);
+ return res;
+ }
+
+ sp<GraphicBuffer> gb = GraphicBuffer::from(anb);
+ res = gb->lockAsync(GRALLOC_USAGE_SW_WRITE_OFTEN, &dstBuffer, fenceFd);
+ if (res != OK) {
+ ALOGE("%s: Error trying to lock output buffer fence: %s (%d)", __FUNCTION__,
+ strerror(-res), res);
+ outputANW->cancelBuffer(mOutputSurface.get(), anb, /*fence*/ -1);
+ return res;
+ }
+
+ if ((gb->getWidth() < finalJpegBufferSize) || (gb->getHeight() != 1)) {
+ ALOGE("%s: Blob buffer size mismatch, expected %dx%d received %zux%u", __FUNCTION__,
+ gb->getWidth(), gb->getHeight(), finalJpegBufferSize, 1U);
+ outputANW->cancelBuffer(mOutputSurface.get(), anb, /*fence*/ -1);
+ return BAD_VALUE;
+ }
+
+ DepthPhotoInputFrame depthPhoto;
+ depthPhoto.mMainJpegBuffer = reinterpret_cast<const char*> (inputFrame.jpegBuffer.data);
+ depthPhoto.mMainJpegWidth = mBlobWidth;
+ depthPhoto.mMainJpegHeight = mBlobHeight;
+ depthPhoto.mMainJpegSize = jpegSize;
+ depthPhoto.mDepthMapBuffer = reinterpret_cast<uint16_t*> (inputFrame.depthBuffer.data);
+ depthPhoto.mDepthMapWidth = inputFrame.depthBuffer.width;
+ depthPhoto.mDepthMapHeight = inputFrame.depthBuffer.height;
+ depthPhoto.mDepthMapStride = inputFrame.depthBuffer.stride;
+ depthPhoto.mJpegQuality = jpegQuality;
+ depthPhoto.mIsLogical = mIsLogicalCamera;
+ depthPhoto.mMaxJpegSize = maxDepthJpegSize;
+ // The camera intrinsic calibration layout is as follows:
+ // [focalLengthX, focalLengthY, opticalCenterX, opticalCenterY, skew]
+ if (mInstrinsicCalibration.size() == 5) {
+ memcpy(depthPhoto.mInstrinsicCalibration, mInstrinsicCalibration.data(),
+ sizeof(depthPhoto.mInstrinsicCalibration));
+ depthPhoto.mIsInstrinsicCalibrationValid = 1;
+ } else {
+ depthPhoto.mIsInstrinsicCalibrationValid = 0;
+ }
+ // The camera lens distortion contains the following lens correction coefficients.
+ // [kappa_1, kappa_2, kappa_3 kappa_4, kappa_5]
+ if (mLensDistortion.size() == 5) {
+ memcpy(depthPhoto.mLensDistortion, mLensDistortion.data(),
+ sizeof(depthPhoto.mLensDistortion));
+ depthPhoto.mIsLensDistortionValid = 1;
+ } else {
+ depthPhoto.mIsLensDistortionValid = 0;
+ }
+
+ size_t actualJpegSize = 0;
+ res = mDepthPhotoProcess(depthPhoto, finalJpegBufferSize, dstBuffer, &actualJpegSize);
+ if (res != 0) {
+ ALOGE("%s: Depth photo processing failed: %s (%d)", __FUNCTION__, strerror(-res), res);
+ outputANW->cancelBuffer(mOutputSurface.get(), anb, /*fence*/ -1);
+ return res;
+ }
+
+ size_t finalJpegSize = actualJpegSize + sizeof(struct camera3_jpeg_blob);
+ if (finalJpegSize > finalJpegBufferSize) {
+ ALOGE("%s: Final jpeg buffer not large enough for the jpeg blob header", __FUNCTION__);
+ outputANW->cancelBuffer(mOutputSurface.get(), anb, /*fence*/ -1);
+ return NO_MEMORY;
+ }
+
+ ALOGV("%s: Final jpeg size: %zu", __func__, finalJpegSize);
+ uint8_t* header = static_cast<uint8_t *> (dstBuffer) +
+ (gb->getWidth() - sizeof(struct camera3_jpeg_blob));
+ struct camera3_jpeg_blob *blob = reinterpret_cast<struct camera3_jpeg_blob*> (header);
+ blob->jpeg_blob_id = CAMERA3_JPEG_BLOB_ID;
+ blob->jpeg_size = actualJpegSize;
+ outputANW->queueBuffer(mOutputSurface.get(), anb, /*fence*/ -1);
+
+ return res;
+}
+
+void DepthCompositeStream::releaseInputFrameLocked(InputFrame *inputFrame /*out*/) {
+ if (inputFrame == nullptr) {
+ return;
+ }
+
+ if (inputFrame->depthBuffer.data != nullptr) {
+ mDepthConsumer->unlockBuffer(inputFrame->depthBuffer);
+ inputFrame->depthBuffer.data = nullptr;
+ mDepthBufferAcquired = false;
+ }
+
+ if (inputFrame->jpegBuffer.data != nullptr) {
+ mBlobConsumer->unlockBuffer(inputFrame->jpegBuffer);
+ inputFrame->jpegBuffer.data = nullptr;
+ mBlobBufferAcquired = false;
+ }
+
+ if ((inputFrame->error || mErrorState) && !inputFrame->errorNotified) {
+ notifyError(inputFrame->frameNumber);
+ inputFrame->errorNotified = true;
+ }
+}
+
+void DepthCompositeStream::releaseInputFramesLocked(int64_t currentTs) {
+ auto it = mPendingInputFrames.begin();
+ while (it != mPendingInputFrames.end()) {
+ if (it->first <= currentTs) {
+ releaseInputFrameLocked(&it->second);
+ it = mPendingInputFrames.erase(it);
+ } else {
+ it++;
+ }
+ }
+}
+
+bool DepthCompositeStream::threadLoop() {
+ int64_t currentTs = INT64_MAX;
+ bool newInputAvailable = false;
+
+ {
+ Mutex::Autolock l(mMutex);
+
+ if (mErrorState) {
+ // In case we landed in error state, return any pending buffers and
+ // halt all further processing.
+ compilePendingInputLocked();
+ releaseInputFramesLocked(currentTs);
+ return false;
+ }
+
+ while (!newInputAvailable) {
+ compilePendingInputLocked();
+ newInputAvailable = getNextReadyInputLocked(¤tTs);
+ if (!newInputAvailable) {
+ auto failingFrameNumber = getNextFailingInputLocked(¤tTs);
+ if (failingFrameNumber >= 0) {
+ // We cannot erase 'mPendingInputFrames[currentTs]' at this point because it is
+ // possible for two internal stream buffers to fail. In such scenario the
+ // composite stream should notify the client about a stream buffer error only
+ // once and this information is kept within 'errorNotified'.
+ // Any present failed input frames will be removed on a subsequent call to
+ // 'releaseInputFramesLocked()'.
+ releaseInputFrameLocked(&mPendingInputFrames[currentTs]);
+ currentTs = INT64_MAX;
+ }
+
+ auto ret = mInputReadyCondition.waitRelative(mMutex, kWaitDuration);
+ if (ret == TIMED_OUT) {
+ return true;
+ } else if (ret != OK) {
+ ALOGE("%s: Timed wait on condition failed: %s (%d)", __FUNCTION__,
+ strerror(-ret), ret);
+ return false;
+ }
+ }
+ }
+ }
+
+ auto res = processInputFrame(mPendingInputFrames[currentTs]);
+ Mutex::Autolock l(mMutex);
+ if (res != OK) {
+ ALOGE("%s: Failed processing frame with timestamp: %" PRIu64 ": %s (%d)", __FUNCTION__,
+ currentTs, strerror(-res), res);
+ mPendingInputFrames[currentTs].error = true;
+ }
+
+ releaseInputFramesLocked(currentTs);
+
+ return true;
+}
+
+bool DepthCompositeStream::isDepthCompositeStream(const sp<Surface> &surface) {
+ ANativeWindow *anw = surface.get();
+ status_t err;
+ int format;
+ if ((err = anw->query(anw, NATIVE_WINDOW_FORMAT, &format)) != OK) {
+ String8 msg = String8::format("Failed to query Surface format: %s (%d)", strerror(-err),
+ err);
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ return false;
+ }
+
+ int dataspace;
+ if ((err = anw->query(anw, NATIVE_WINDOW_DEFAULT_DATASPACE, &dataspace)) != OK) {
+ String8 msg = String8::format("Failed to query Surface dataspace: %s (%d)", strerror(-err),
+ err);
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ return false;
+ }
+
+ if ((format == HAL_PIXEL_FORMAT_BLOB) && (dataspace == HAL_DATASPACE_DYNAMIC_DEPTH)) {
+ return true;
+ }
+
+ return false;
+}
+
+status_t DepthCompositeStream::createInternalStreams(const std::vector<sp<Surface>>& consumers,
+ bool /*hasDeferredConsumer*/, uint32_t width, uint32_t height, int format,
+ camera3_stream_rotation_t rotation, int *id, const String8& physicalCameraId,
+ std::vector<int> *surfaceIds, int /*streamSetId*/, bool /*isShared*/) {
+ if (mSupportedDepthSizes.empty()) {
+ ALOGE("%s: This camera device doesn't support any depth map streams!", __FUNCTION__);
+ return INVALID_OPERATION;
+ }
+
+ size_t depthWidth, depthHeight;
+ auto ret = getMatchingDepthSize(width, height, mSupportedDepthSizes, &depthWidth, &depthHeight);
+ if (ret != OK) {
+ ALOGE("%s: Failed to find an appropriate depth stream size!", __FUNCTION__);
+ return ret;
+ }
+
+ sp<CameraDeviceBase> device = mDevice.promote();
+ if (!device.get()) {
+ ALOGE("%s: Invalid camera device!", __FUNCTION__);
+ return NO_INIT;
+ }
+
+ sp<IGraphicBufferProducer> producer;
+ sp<IGraphicBufferConsumer> consumer;
+ BufferQueue::createBufferQueue(&producer, &consumer);
+ mBlobConsumer = new CpuConsumer(consumer, /*maxLockedBuffers*/1, /*controlledByApp*/ true);
+ mBlobConsumer->setFrameAvailableListener(this);
+ mBlobConsumer->setName(String8("Camera3-JpegCompositeStream"));
+ mBlobSurface = new Surface(producer);
+
+ ret = device->createStream(mBlobSurface, width, height, format, kJpegDataSpace, rotation,
+ id, physicalCameraId, surfaceIds);
+ if (ret == OK) {
+ mBlobStreamId = *id;
+ mBlobSurfaceId = (*surfaceIds)[0];
+ mOutputSurface = consumers[0];
+ } else {
+ return ret;
+ }
+
+ BufferQueue::createBufferQueue(&producer, &consumer);
+ mDepthConsumer = new CpuConsumer(consumer, /*maxLockedBuffers*/ 1, /*controlledByApp*/ true);
+ mDepthConsumer->setFrameAvailableListener(this);
+ mDepthConsumer->setName(String8("Camera3-DepthCompositeStream"));
+ mDepthSurface = new Surface(producer);
+ std::vector<int> depthSurfaceId;
+ ret = device->createStream(mDepthSurface, depthWidth, depthHeight, kDepthMapPixelFormat,
+ kDepthMapDataSpace, rotation, &mDepthStreamId, physicalCameraId, &depthSurfaceId);
+ if (ret == OK) {
+ mDepthSurfaceId = depthSurfaceId[0];
+ } else {
+ return ret;
+ }
+
+ ret = registerCompositeStreamListener(getStreamId());
+ if (ret != OK) {
+ ALOGE("%s: Failed to register blob stream listener!", __FUNCTION__);
+ return ret;
+ }
+
+ ret = registerCompositeStreamListener(mDepthStreamId);
+ if (ret != OK) {
+ ALOGE("%s: Failed to register depth stream listener!", __FUNCTION__);
+ return ret;
+ }
+
+ mBlobWidth = width;
+ mBlobHeight = height;
+
+ return ret;
+}
+
+status_t DepthCompositeStream::configureStream() {
+ if (isRunning()) {
+ // Processing thread is already running, nothing more to do.
+ return NO_ERROR;
+ }
+
+ if ((mDepthPhotoLibHandle == nullptr) || (mDepthPhotoProcess == nullptr)) {
+ ALOGE("%s: Depth photo library is not present!", __FUNCTION__);
+ return NO_INIT;
+ }
+
+ if (mOutputSurface.get() == nullptr) {
+ ALOGE("%s: No valid output surface set!", __FUNCTION__);
+ return NO_INIT;
+ }
+
+ auto res = mOutputSurface->connect(NATIVE_WINDOW_API_CAMERA, mProducerListener);
+ if (res != OK) {
+ ALOGE("%s: Unable to connect to native window for stream %d",
+ __FUNCTION__, mBlobStreamId);
+ return res;
+ }
+
+ if ((res = native_window_set_buffers_format(mOutputSurface.get(), HAL_PIXEL_FORMAT_BLOB))
+ != OK) {
+ ALOGE("%s: Unable to configure stream buffer format for stream %d", __FUNCTION__,
+ mBlobStreamId);
+ return res;
+ }
+
+ int maxProducerBuffers;
+ ANativeWindow *anw = mBlobSurface.get();
+ if ((res = anw->query(anw, NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS, &maxProducerBuffers)) != OK) {
+ ALOGE("%s: Unable to query consumer undequeued"
+ " buffer count for stream %d", __FUNCTION__, mBlobStreamId);
+ return res;
+ }
+
+ ANativeWindow *anwConsumer = mOutputSurface.get();
+ int maxConsumerBuffers;
+ if ((res = anwConsumer->query(anwConsumer, NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS,
+ &maxConsumerBuffers)) != OK) {
+ ALOGE("%s: Unable to query consumer undequeued"
+ " buffer count for stream %d", __FUNCTION__, mBlobStreamId);
+ return res;
+ }
+
+ if ((res = native_window_set_buffer_count(
+ anwConsumer, maxProducerBuffers + maxConsumerBuffers)) != OK) {
+ ALOGE("%s: Unable to set buffer count for stream %d", __FUNCTION__, mBlobStreamId);
+ return res;
+ }
+
+ run("DepthCompositeStreamProc");
+
+ return NO_ERROR;
+}
+
+status_t DepthCompositeStream::deleteInternalStreams() {
+ // The 'CameraDeviceClient' parent will delete the blob stream
+ requestExit();
+
+ auto ret = join();
+ if (ret != OK) {
+ ALOGE("%s: Failed to join with the main processing thread: %s (%d)", __FUNCTION__,
+ strerror(-ret), ret);
+ }
+
+ sp<CameraDeviceBase> device = mDevice.promote();
+ if (!device.get()) {
+ ALOGE("%s: Invalid camera device!", __FUNCTION__);
+ return NO_INIT;
+ }
+
+ if (mDepthStreamId >= 0) {
+ ret = device->deleteStream(mDepthStreamId);
+ mDepthStreamId = -1;
+ }
+
+ return ret;
+}
+
+void DepthCompositeStream::onFrameAvailable(const BufferItem& item) {
+ if (item.mDataSpace == kJpegDataSpace) {
+ ALOGV("%s: Jpeg buffer with ts: %" PRIu64 " ms. arrived!",
+ __func__, ns2ms(item.mTimestamp));
+
+ Mutex::Autolock l(mMutex);
+ if (!mErrorState) {
+ mInputJpegBuffers.push_back(item.mTimestamp);
+ mInputReadyCondition.signal();
+ }
+ } else if (item.mDataSpace == kDepthMapDataSpace) {
+ ALOGV("%s: Depth buffer with ts: %" PRIu64 " ms. arrived!", __func__,
+ ns2ms(item.mTimestamp));
+
+ Mutex::Autolock l(mMutex);
+ if (!mErrorState) {
+ mInputDepthBuffers.push_back(item.mTimestamp);
+ mInputReadyCondition.signal();
+ }
+ } else {
+ ALOGE("%s: Unexpected data space: 0x%x", __FUNCTION__, item.mDataSpace);
+ }
+}
+
+status_t DepthCompositeStream::insertGbp(SurfaceMap* /*out*/outSurfaceMap,
+ Vector<int32_t> * /*out*/outputStreamIds, int32_t* /*out*/currentStreamId) {
+ if (outSurfaceMap->find(mDepthStreamId) == outSurfaceMap->end()) {
+ (*outSurfaceMap)[mDepthStreamId] = std::vector<size_t>();
+ outputStreamIds->push_back(mDepthStreamId);
+ }
+ (*outSurfaceMap)[mDepthStreamId].push_back(mDepthSurfaceId);
+
+ if (outSurfaceMap->find(mBlobStreamId) == outSurfaceMap->end()) {
+ (*outSurfaceMap)[mBlobStreamId] = std::vector<size_t>();
+ outputStreamIds->push_back(mBlobStreamId);
+ }
+ (*outSurfaceMap)[mBlobStreamId].push_back(mBlobSurfaceId);
+
+ if (currentStreamId != nullptr) {
+ *currentStreamId = mBlobStreamId;
+ }
+
+ return NO_ERROR;
+}
+
+void DepthCompositeStream::onResultError(const CaptureResultExtras& resultExtras) {
+ // Processing can continue even in case of result errors.
+ // At the moment depth composite stream processing relies mainly on static camera
+ // characteristics data. The actual result data can be used for the jpeg quality but
+ // in case it is absent we can default to maximum.
+ eraseResult(resultExtras.frameNumber);
+}
+
+bool DepthCompositeStream::onStreamBufferError(const CaptureResultExtras& resultExtras) {
+ bool ret = false;
+ // Buffer errors concerning internal composite streams should not be directly visible to
+ // camera clients. They must only receive a single buffer error with the public composite
+ // stream id.
+ if ((resultExtras.errorStreamId == mDepthStreamId) ||
+ (resultExtras.errorStreamId == mBlobStreamId)) {
+ flagAnErrorFrameNumber(resultExtras.frameNumber);
+ ret = true;
+ }
+
+ return ret;
+}
+
+status_t DepthCompositeStream::getMatchingDepthSize(size_t width, size_t height,
+ const std::vector<std::tuple<size_t, size_t>>& supporedDepthSizes,
+ size_t *depthWidth /*out*/, size_t *depthHeight /*out*/) {
+ if ((depthWidth == nullptr) || (depthHeight == nullptr)) {
+ return BAD_VALUE;
+ }
+
+ float arTol = CameraProviderManager::kDepthARTolerance;
+ *depthWidth = *depthHeight = 0;
+
+ float aspectRatio = static_cast<float> (width) / static_cast<float> (height);
+ for (const auto& it : supporedDepthSizes) {
+ auto currentWidth = std::get<0>(it);
+ auto currentHeight = std::get<1>(it);
+ if ((currentWidth == width) && (currentHeight == height)) {
+ *depthWidth = width;
+ *depthHeight = height;
+ break;
+ } else {
+ float currentRatio = static_cast<float> (currentWidth) /
+ static_cast<float> (currentHeight);
+ auto currentSize = currentWidth * currentHeight;
+ auto oldSize = (*depthWidth) * (*depthHeight);
+ if ((fabs(aspectRatio - currentRatio) <= arTol) && (currentSize > oldSize)) {
+ *depthWidth = currentWidth;
+ *depthHeight = currentHeight;
+ }
+ }
+ }
+
+ return ((*depthWidth > 0) && (*depthHeight > 0)) ? OK : BAD_VALUE;
+}
+
+void DepthCompositeStream::getSupportedDepthSizes(const CameraMetadata& ch,
+ std::vector<std::tuple<size_t, size_t>>* depthSizes /*out*/) {
+ if (depthSizes == nullptr) {
+ return;
+ }
+
+ auto entry = ch.find(ANDROID_DEPTH_AVAILABLE_DEPTH_STREAM_CONFIGURATIONS);
+ if (entry.count > 0) {
+ // Depth stream dimensions have four int32_t components
+ // (pixelformat, width, height, type)
+ size_t entryCount = entry.count / 4;
+ depthSizes->reserve(entryCount);
+ for (size_t i = 0; i < entry.count; i += 4) {
+ if ((entry.data.i32[i] == kDepthMapPixelFormat) &&
+ (entry.data.i32[i+3] ==
+ ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT)) {
+ depthSizes->push_back(std::make_tuple(entry.data.i32[i+1],
+ entry.data.i32[i+2]));
+ }
+ }
+ }
+}
+
+status_t DepthCompositeStream::getCompositeStreamInfo(const OutputStreamInfo &streamInfo,
+ const CameraMetadata& ch, std::vector<OutputStreamInfo>* compositeOutput /*out*/) {
+ if (compositeOutput == nullptr) {
+ return BAD_VALUE;
+ }
+
+ std::vector<std::tuple<size_t, size_t>> depthSizes;
+ getSupportedDepthSizes(ch, &depthSizes);
+ if (depthSizes.empty()) {
+ ALOGE("%s: No depth stream configurations present", __FUNCTION__);
+ return BAD_VALUE;
+ }
+
+ size_t depthWidth, depthHeight;
+ auto ret = getMatchingDepthSize(streamInfo.width, streamInfo.height, depthSizes, &depthWidth,
+ &depthHeight);
+ if (ret != OK) {
+ ALOGE("%s: No matching depth stream size found", __FUNCTION__);
+ return ret;
+ }
+
+ compositeOutput->clear();
+ compositeOutput->insert(compositeOutput->end(), 2, streamInfo);
+
+ // Jpeg/Blob stream info
+ (*compositeOutput)[0].dataSpace = kJpegDataSpace;
+ (*compositeOutput)[0].consumerUsage = GRALLOC_USAGE_SW_READ_OFTEN;
+
+ // Depth stream info
+ (*compositeOutput)[1].width = depthWidth;
+ (*compositeOutput)[1].height = depthHeight;
+ (*compositeOutput)[1].format = kDepthMapPixelFormat;
+ (*compositeOutput)[1].dataSpace = kDepthMapDataSpace;
+ (*compositeOutput)[1].consumerUsage = GRALLOC_USAGE_SW_READ_OFTEN;
+
+ return NO_ERROR;
+}
+
+}; // namespace camera3
+}; // namespace android
diff --git a/services/camera/libcameraservice/api2/DepthCompositeStream.h b/services/camera/libcameraservice/api2/DepthCompositeStream.h
new file mode 100644
index 0000000..e8fe517
--- /dev/null
+++ b/services/camera/libcameraservice/api2/DepthCompositeStream.h
@@ -0,0 +1,151 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA_CAMERA3_DEPTH_COMPOSITE_STREAM_H
+#define ANDROID_SERVERS_CAMERA_CAMERA3_DEPTH_COMPOSITE_STREAM_H
+
+#include "common/DepthPhotoProcessor.h"
+#include <dynamic_depth/imaging_model.h>
+#include <dynamic_depth/depth_map.h>
+
+#include <gui/IProducerListener.h>
+#include <gui/CpuConsumer.h>
+
+#include "CompositeStream.h"
+
+using dynamic_depth::DepthMap;
+using dynamic_depth::Item;
+using dynamic_depth::ImagingModel;
+
+namespace android {
+
+class CameraDeviceClient;
+class CameraMetadata;
+class Surface;
+
+namespace camera3 {
+
+class DepthCompositeStream : public CompositeStream, public Thread,
+ public CpuConsumer::FrameAvailableListener {
+
+public:
+ DepthCompositeStream(wp<CameraDeviceBase> device,
+ wp<hardware::camera2::ICameraDeviceCallbacks> cb);
+ ~DepthCompositeStream() override;
+
+ static bool isDepthCompositeStream(const sp<Surface> &surface);
+
+ // CompositeStream overrides
+ status_t createInternalStreams(const std::vector<sp<Surface>>& consumers,
+ bool hasDeferredConsumer, uint32_t width, uint32_t height, int format,
+ camera3_stream_rotation_t rotation, int *id, const String8& physicalCameraId,
+ std::vector<int> *surfaceIds, int streamSetId, bool isShared) override;
+ status_t deleteInternalStreams() override;
+ status_t configureStream() override;
+ status_t insertGbp(SurfaceMap* /*out*/outSurfaceMap, Vector<int32_t>* /*out*/outputStreamIds,
+ int32_t* /*out*/currentStreamId) override;
+ int getStreamId() override { return mBlobStreamId; }
+
+ // CpuConsumer listener implementation
+ void onFrameAvailable(const BufferItem& item) override;
+
+ // Return stream information about the internal camera streams
+ static status_t getCompositeStreamInfo(const OutputStreamInfo &streamInfo,
+ const CameraMetadata& ch, std::vector<OutputStreamInfo>* compositeOutput /*out*/);
+
+protected:
+
+ bool threadLoop() override;
+ bool onStreamBufferError(const CaptureResultExtras& resultExtras) override;
+ void onResultError(const CaptureResultExtras& resultExtras) override;
+
+private:
+ struct InputFrame {
+ CpuConsumer::LockedBuffer depthBuffer;
+ CpuConsumer::LockedBuffer jpegBuffer;
+ CameraMetadata result;
+ bool error;
+ bool errorNotified;
+ int64_t frameNumber;
+
+ InputFrame() : error(false), errorNotified(false), frameNumber(-1) { }
+ };
+
+ // Helper methods
+ static void getSupportedDepthSizes(const CameraMetadata& ch,
+ std::vector<std::tuple<size_t, size_t>>* depthSizes /*out*/);
+ static status_t getMatchingDepthSize(size_t width, size_t height,
+ const std::vector<std::tuple<size_t, size_t>>& supporedDepthSizes,
+ size_t *depthWidth /*out*/, size_t *depthHeight /*out*/);
+
+ // Dynamic depth processing
+ status_t encodeGrayscaleJpeg(size_t width, size_t height, uint8_t *in, void *out,
+ const size_t maxOutSize, uint8_t jpegQuality, size_t &actualSize);
+ std::unique_ptr<DepthMap> processDepthMapFrame(const CpuConsumer::LockedBuffer &depthMapBuffer,
+ size_t maxJpegSize, uint8_t jpegQuality,
+ std::vector<std::unique_ptr<Item>>* items /*out*/);
+ std::unique_ptr<ImagingModel> getImagingModel();
+ status_t processInputFrame(const InputFrame &inputFrame);
+
+ // Buffer/Results handling
+ void compilePendingInputLocked();
+ void releaseInputFrameLocked(InputFrame *inputFrame /*out*/);
+ void releaseInputFramesLocked(int64_t currentTs);
+
+ // Find first complete and valid frame with smallest timestamp
+ bool getNextReadyInputLocked(int64_t *currentTs /*inout*/);
+
+ // Find next failing frame number with smallest timestamp and return respective frame number
+ int64_t getNextFailingInputLocked(int64_t *currentTs /*inout*/);
+
+ static const nsecs_t kWaitDuration = 10000000; // 10 ms
+ static const auto kDepthMapPixelFormat = HAL_PIXEL_FORMAT_Y16;
+ static const auto kDepthMapDataSpace = HAL_DATASPACE_DEPTH;
+ static const auto kJpegDataSpace = HAL_DATASPACE_V0_JFIF;
+
+ struct ProducerListener : public BnProducerListener {
+ // ProducerListener implementation
+ void onBufferReleased() override { /*No impl. for now*/ };
+ };
+
+ int mBlobStreamId, mBlobSurfaceId, mDepthStreamId, mDepthSurfaceId;
+ size_t mBlobWidth, mBlobHeight;
+ sp<CpuConsumer> mBlobConsumer, mDepthConsumer;
+ bool mDepthBufferAcquired, mBlobBufferAcquired;
+ sp<Surface> mDepthSurface, mBlobSurface, mOutputSurface;
+ sp<ProducerListener> mProducerListener;
+
+ ssize_t mMaxJpegSize;
+ std::vector<std::tuple<size_t, size_t>> mSupportedDepthSizes;
+ std::vector<float> mInstrinsicCalibration, mLensDistortion;
+ bool mIsLogicalCamera;
+ void* mDepthPhotoLibHandle;
+ process_depth_photo_frame mDepthPhotoProcess;
+
+ // Keep all incoming Depth buffer timestamps pending further processing.
+ std::vector<int64_t> mInputDepthBuffers;
+
+ // Keep all incoming Jpeg/Blob buffer timestamps pending further processing.
+ std::vector<int64_t> mInputJpegBuffers;
+
+ // Map of all input frames pending further processing.
+ std::unordered_map<int64_t, InputFrame> mPendingInputFrames;
+};
+
+}; //namespace camera3
+}; //namespace android
+
+#endif
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.cpp b/services/camera/libcameraservice/common/CameraProviderManager.cpp
index a9cbe72..3059b07 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.cpp
+++ b/services/camera/libcameraservice/common/CameraProviderManager.cpp
@@ -24,6 +24,8 @@
#include <algorithm>
#include <chrono>
+#include "common/DepthPhotoProcessor.h"
+#include <dlfcn.h>
#include <future>
#include <inttypes.h>
#include <hardware/camera_common.h>
@@ -58,6 +60,8 @@
} // anonymous namespace
+const float CameraProviderManager::kDepthARTolerance = .1f;
+
CameraProviderManager::HardwareServiceInteractionProxy
CameraProviderManager::sHardwareServiceInteractionProxy{};
@@ -500,6 +504,275 @@
}
}
+void CameraProviderManager::ProviderInfo::DeviceInfo3::getSupportedSizes(
+ const CameraMetadata& ch, uint32_t tag, android_pixel_format_t format,
+ std::vector<std::tuple<size_t, size_t>> *sizes/*out*/) {
+ if (sizes == nullptr) {
+ return;
+ }
+
+ auto scalerDims = ch.find(tag);
+ if (scalerDims.count > 0) {
+ // Scaler entry contains 4 elements (format, width, height, type)
+ for (size_t i = 0; i < scalerDims.count; i += 4) {
+ if ((scalerDims.data.i32[i] == format) &&
+ (scalerDims.data.i32[i+3] ==
+ ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT)) {
+ sizes->push_back(std::make_tuple(scalerDims.data.i32[i+1],
+ scalerDims.data.i32[i+2]));
+ }
+ }
+ }
+}
+
+void CameraProviderManager::ProviderInfo::DeviceInfo3::getSupportedDurations(
+ const CameraMetadata& ch, uint32_t tag, android_pixel_format_t format,
+ const std::vector<std::tuple<size_t, size_t>>& sizes,
+ std::vector<int64_t> *durations/*out*/) {
+ if (durations == nullptr) {
+ return;
+ }
+
+ auto availableDurations = ch.find(tag);
+ if (availableDurations.count > 0) {
+ // Duration entry contains 4 elements (format, width, height, duration)
+ for (size_t i = 0; i < availableDurations.count; i += 4) {
+ for (const auto& size : sizes) {
+ int64_t width = std::get<0>(size);
+ int64_t height = std::get<1>(size);
+ if ((availableDurations.data.i64[i] == format) &&
+ (availableDurations.data.i64[i+1] == width) &&
+ (availableDurations.data.i64[i+2] == height)) {
+ durations->push_back(availableDurations.data.i64[i+3]);
+ }
+ }
+ }
+ }
+}
+void CameraProviderManager::ProviderInfo::DeviceInfo3::getSupportedDynamicDepthDurations(
+ const std::vector<int64_t>& depthDurations, const std::vector<int64_t>& blobDurations,
+ std::vector<int64_t> *dynamicDepthDurations /*out*/) {
+ if ((dynamicDepthDurations == nullptr) || (depthDurations.size() != blobDurations.size())) {
+ return;
+ }
+
+ // Unfortunately there is no direct way to calculate the dynamic depth stream duration.
+ // Processing time on camera service side can vary greatly depending on multiple
+ // variables which are not under our control. Make a guesstimate by taking the maximum
+ // corresponding duration value from depth and blob.
+ auto depthDuration = depthDurations.begin();
+ auto blobDuration = blobDurations.begin();
+ dynamicDepthDurations->reserve(depthDurations.size());
+ while ((depthDuration != depthDurations.end()) && (blobDuration != blobDurations.end())) {
+ dynamicDepthDurations->push_back(std::max(*depthDuration, *blobDuration));
+ depthDuration++; blobDuration++;
+ }
+}
+
+void CameraProviderManager::ProviderInfo::DeviceInfo3::getSupportedDynamicDepthSizes(
+ const std::vector<std::tuple<size_t, size_t>>& blobSizes,
+ const std::vector<std::tuple<size_t, size_t>>& depthSizes,
+ std::vector<std::tuple<size_t, size_t>> *dynamicDepthSizes /*out*/,
+ std::vector<std::tuple<size_t, size_t>> *internalDepthSizes /*out*/) {
+ if (dynamicDepthSizes == nullptr || internalDepthSizes == nullptr) {
+ return;
+ }
+
+ // The dynamic depth spec. does not mention how close the AR ratio should be.
+ // Try using something appropriate.
+ float ARTolerance = kDepthARTolerance;
+
+ for (const auto& blobSize : blobSizes) {
+ float jpegAR = static_cast<float> (std::get<0>(blobSize)) /
+ static_cast<float>(std::get<1>(blobSize));
+ bool found = false;
+ for (const auto& depthSize : depthSizes) {
+ if (depthSize == blobSize) {
+ internalDepthSizes->push_back(depthSize);
+ found = true;
+ break;
+ } else {
+ float depthAR = static_cast<float> (std::get<0>(depthSize)) /
+ static_cast<float>(std::get<1>(depthSize));
+ if (std::fabs(jpegAR - depthAR) <= ARTolerance) {
+ internalDepthSizes->push_back(depthSize);
+ found = true;
+ break;
+ }
+ }
+ }
+
+ if (found) {
+ dynamicDepthSizes->push_back(blobSize);
+ }
+ }
+}
+
+bool CameraProviderManager::ProviderInfo::DeviceInfo3::isDepthPhotoLibraryPresent() {
+ static bool libraryPresent = false;
+ static bool initialized = false;
+ if (initialized) {
+ return libraryPresent;
+ } else {
+ initialized = true;
+ }
+
+ void* depthLibHandle = dlopen(camera3::kDepthPhotoLibrary, RTLD_NOW | RTLD_LOCAL);
+ if (depthLibHandle == nullptr) {
+ return false;
+ }
+
+ auto processFunc = dlsym(depthLibHandle, camera3::kDepthPhotoProcessFunction);
+ if (processFunc != nullptr) {
+ libraryPresent = true;
+ } else {
+ libraryPresent = false;
+ }
+ dlclose(depthLibHandle);
+
+ return libraryPresent;
+}
+
+status_t CameraProviderManager::ProviderInfo::DeviceInfo3::addDynamicDepthTags() {
+ uint32_t depthExclTag = ANDROID_DEPTH_DEPTH_IS_EXCLUSIVE;
+ uint32_t depthSizesTag = ANDROID_DEPTH_AVAILABLE_DEPTH_STREAM_CONFIGURATIONS;
+ auto& c = mCameraCharacteristics;
+ std::vector<std::tuple<size_t, size_t>> supportedBlobSizes, supportedDepthSizes,
+ supportedDynamicDepthSizes, internalDepthSizes;
+ auto chTags = c.find(ANDROID_REQUEST_AVAILABLE_CHARACTERISTICS_KEYS);
+ if (chTags.count == 0) {
+ ALOGE("%s: Supported camera characteristics is empty!", __FUNCTION__);
+ return BAD_VALUE;
+ }
+
+ bool isDepthExclusivePresent = std::find(chTags.data.i32, chTags.data.i32 + chTags.count,
+ depthExclTag) != (chTags.data.i32 + chTags.count);
+ bool isDepthSizePresent = std::find(chTags.data.i32, chTags.data.i32 + chTags.count,
+ depthExclTag) != (chTags.data.i32 + chTags.count);
+ if (!(isDepthExclusivePresent && isDepthSizePresent)) {
+ // No depth support, nothing more to do.
+ return OK;
+ }
+
+ auto depthExclusiveEntry = c.find(depthExclTag);
+ if (depthExclusiveEntry.count > 0) {
+ if (depthExclusiveEntry.data.u8[0] != ANDROID_DEPTH_DEPTH_IS_EXCLUSIVE_FALSE) {
+ // Depth support is exclusive, nothing more to do.
+ return OK;
+ }
+ } else {
+ ALOGE("%s: Advertised depth exclusive tag but value is not present!", __FUNCTION__);
+ return BAD_VALUE;
+ }
+
+ getSupportedSizes(c, ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS, HAL_PIXEL_FORMAT_BLOB,
+ &supportedBlobSizes);
+ getSupportedSizes(c, depthSizesTag, HAL_PIXEL_FORMAT_Y16, &supportedDepthSizes);
+ if (supportedBlobSizes.empty() || supportedDepthSizes.empty()) {
+ // Nothing to do in this case.
+ return OK;
+ }
+
+ getSupportedDynamicDepthSizes(supportedBlobSizes, supportedDepthSizes,
+ &supportedDynamicDepthSizes, &internalDepthSizes);
+ if (supportedDynamicDepthSizes.empty()) {
+ ALOGE("%s: No dynamic depth size matched!", __func__);
+ // Nothing more to do.
+ return OK;
+ }
+
+ if(!isDepthPhotoLibraryPresent()) {
+ // Depth photo processing library is not present, nothing more to do.
+ return OK;
+ }
+
+ std::vector<int32_t> dynamicDepthEntries;
+ for (const auto& it : supportedDynamicDepthSizes) {
+ int32_t entry[4] = {HAL_PIXEL_FORMAT_BLOB, static_cast<int32_t> (std::get<0>(it)),
+ static_cast<int32_t> (std::get<1>(it)),
+ ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT };
+ dynamicDepthEntries.insert(dynamicDepthEntries.end(), entry, entry + 4);
+ }
+
+ std::vector<int64_t> depthMinDurations, depthStallDurations;
+ std::vector<int64_t> blobMinDurations, blobStallDurations;
+ std::vector<int64_t> dynamicDepthMinDurations, dynamicDepthStallDurations;
+
+ getSupportedDurations(c, ANDROID_DEPTH_AVAILABLE_DEPTH_MIN_FRAME_DURATIONS,
+ HAL_PIXEL_FORMAT_Y16, internalDepthSizes, &depthMinDurations);
+ getSupportedDurations(c, ANDROID_SCALER_AVAILABLE_MIN_FRAME_DURATIONS,
+ HAL_PIXEL_FORMAT_BLOB, supportedDynamicDepthSizes, &blobMinDurations);
+ if (blobMinDurations.empty() || depthMinDurations.empty() ||
+ (depthMinDurations.size() != blobMinDurations.size())) {
+ ALOGE("%s: Unexpected number of available depth min durations! %zu vs. %zu",
+ __FUNCTION__, depthMinDurations.size(), blobMinDurations.size());
+ return BAD_VALUE;
+ }
+
+ getSupportedDurations(c, ANDROID_DEPTH_AVAILABLE_DEPTH_STALL_DURATIONS,
+ HAL_PIXEL_FORMAT_Y16, internalDepthSizes, &depthStallDurations);
+ getSupportedDurations(c, ANDROID_SCALER_AVAILABLE_STALL_DURATIONS,
+ HAL_PIXEL_FORMAT_BLOB, supportedDynamicDepthSizes, &blobStallDurations);
+ if (blobStallDurations.empty() || depthStallDurations.empty() ||
+ (depthStallDurations.size() != blobStallDurations.size())) {
+ ALOGE("%s: Unexpected number of available depth stall durations! %zu vs. %zu",
+ __FUNCTION__, depthStallDurations.size(), blobStallDurations.size());
+ return BAD_VALUE;
+ }
+
+ getSupportedDynamicDepthDurations(depthMinDurations, blobMinDurations,
+ &dynamicDepthMinDurations);
+ getSupportedDynamicDepthDurations(depthStallDurations, blobStallDurations,
+ &dynamicDepthStallDurations);
+ if (dynamicDepthMinDurations.empty() || dynamicDepthStallDurations.empty() ||
+ (dynamicDepthMinDurations.size() != dynamicDepthStallDurations.size())) {
+ ALOGE("%s: Unexpected number of dynamic depth stall/min durations! %zu vs. %zu",
+ __FUNCTION__, dynamicDepthMinDurations.size(), dynamicDepthStallDurations.size());
+ return BAD_VALUE;
+ }
+
+ std::vector<int64_t> dynamicDepthMinDurationEntries;
+ auto itDuration = dynamicDepthMinDurations.begin();
+ auto itSize = supportedDynamicDepthSizes.begin();
+ while (itDuration != dynamicDepthMinDurations.end()) {
+ int64_t entry[4] = {HAL_PIXEL_FORMAT_BLOB, static_cast<int32_t> (std::get<0>(*itSize)),
+ static_cast<int32_t> (std::get<1>(*itSize)), *itDuration};
+ dynamicDepthMinDurationEntries.insert(dynamicDepthMinDurationEntries.end(), entry,
+ entry + 4);
+ itDuration++; itSize++;
+ }
+
+ std::vector<int64_t> dynamicDepthStallDurationEntries;
+ itDuration = dynamicDepthStallDurations.begin();
+ itSize = supportedDynamicDepthSizes.begin();
+ while (itDuration != dynamicDepthStallDurations.end()) {
+ int64_t entry[4] = {HAL_PIXEL_FORMAT_BLOB, static_cast<int32_t> (std::get<0>(*itSize)),
+ static_cast<int32_t> (std::get<1>(*itSize)), *itDuration};
+ dynamicDepthStallDurationEntries.insert(dynamicDepthStallDurationEntries.end(), entry,
+ entry + 4);
+ itDuration++; itSize++;
+ }
+
+ c.update(ANDROID_DEPTH_AVAILABLE_DYNAMIC_DEPTH_STREAM_CONFIGURATIONS,
+ dynamicDepthEntries.data(), dynamicDepthEntries.size());
+ c.update(ANDROID_DEPTH_AVAILABLE_DYNAMIC_DEPTH_MIN_FRAME_DURATIONS,
+ dynamicDepthMinDurationEntries.data(), dynamicDepthMinDurationEntries.size());
+ c.update(ANDROID_DEPTH_AVAILABLE_DYNAMIC_DEPTH_STALL_DURATIONS,
+ dynamicDepthStallDurationEntries.data(), dynamicDepthStallDurationEntries.size());
+
+ std::vector<int32_t> supportedChTags;
+ supportedChTags.reserve(chTags.count + 3);
+ supportedChTags.insert(supportedChTags.end(), chTags.data.i32,
+ chTags.data.i32 + chTags.count);
+ supportedChTags.push_back(ANDROID_DEPTH_AVAILABLE_DYNAMIC_DEPTH_STREAM_CONFIGURATIONS);
+ supportedChTags.push_back(ANDROID_DEPTH_AVAILABLE_DYNAMIC_DEPTH_MIN_FRAME_DURATIONS);
+ supportedChTags.push_back(ANDROID_DEPTH_AVAILABLE_DYNAMIC_DEPTH_STALL_DURATIONS);
+ c.update(ANDROID_REQUEST_AVAILABLE_CHARACTERISTICS_KEYS, supportedChTags.data(),
+ supportedChTags.size());
+
+ return OK;
+}
+
status_t CameraProviderManager::ProviderInfo::DeviceInfo3::fixupMonochromeTags() {
status_t res = OK;
auto& c = mCameraCharacteristics;
@@ -1442,6 +1715,12 @@
__FUNCTION__, strerror(-res), res);
return;
}
+ res = addDynamicDepthTags();
+ if (OK != res) {
+ ALOGE("%s: Failed appending dynamic depth tags: %s (%d)", __FUNCTION__, strerror(-res),
+ res);
+ return;
+ }
camera_metadata_entry flashAvailable =
mCameraCharacteristics.find(ANDROID_FLASH_INFO_AVAILABLE);
if (flashAvailable.count == 1 &&
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.h b/services/camera/libcameraservice/common/CameraProviderManager.h
index 0966743..fbd7d2e 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.h
+++ b/services/camera/libcameraservice/common/CameraProviderManager.h
@@ -265,6 +265,8 @@
bool isLogicalCamera(const std::string& id, std::vector<std::string>* physicalCameraIds);
bool isHiddenPhysicalCamera(const std::string& cameraId);
+
+ static const float kDepthARTolerance;
private:
// All private members, unless otherwise noted, expect mInterfaceMutex to be locked before use
mutable std::mutex mInterfaceMutex;
@@ -470,6 +472,23 @@
std::unordered_map<std::string, CameraMetadata> mPhysicalCameraCharacteristics;
void queryPhysicalCameraIds();
status_t fixupMonochromeTags();
+ status_t addDynamicDepthTags();
+ static void getSupportedSizes(const CameraMetadata& ch, uint32_t tag,
+ android_pixel_format_t format,
+ std::vector<std::tuple<size_t, size_t>> *sizes /*out*/);
+ void getSupportedDurations( const CameraMetadata& ch, uint32_t tag,
+ android_pixel_format_t format,
+ const std::vector<std::tuple<size_t, size_t>>& sizes,
+ std::vector<int64_t> *durations/*out*/);
+ void getSupportedDynamicDepthDurations(const std::vector<int64_t>& depthDurations,
+ const std::vector<int64_t>& blobDurations,
+ std::vector<int64_t> *dynamicDepthDurations /*out*/);
+ static bool isDepthPhotoLibraryPresent();
+ static void getSupportedDynamicDepthSizes(
+ const std::vector<std::tuple<size_t, size_t>>& blobSizes,
+ const std::vector<std::tuple<size_t, size_t>>& depthSizes,
+ std::vector<std::tuple<size_t, size_t>> *dynamicDepthSizes /*out*/,
+ std::vector<std::tuple<size_t, size_t>> *internalDepthSizes /*out*/);
status_t removeAvailableKeys(CameraMetadata& c, const std::vector<uint32_t>& keys,
uint32_t keyTag);
};
diff --git a/services/camera/libcameraservice/common/DepthPhotoProcessor.cpp b/services/camera/libcameraservice/common/DepthPhotoProcessor.cpp
new file mode 100644
index 0000000..a945aca
--- /dev/null
+++ b/services/camera/libcameraservice/common/DepthPhotoProcessor.cpp
@@ -0,0 +1,340 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "Camera3-DepthPhotoProcessor"
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+//#define LOG_NDEBUG 0
+//
+
+#include "DepthPhotoProcessor.h"
+
+#include <dynamic_depth/camera.h>
+#include <dynamic_depth/cameras.h>
+#include <dynamic_depth/container.h>
+#include <dynamic_depth/device.h>
+#include <dynamic_depth/dimension.h>
+#include <dynamic_depth/dynamic_depth.h>
+#include <dynamic_depth/point.h>
+#include <dynamic_depth/pose.h>
+#include <dynamic_depth/profile.h>
+#include <dynamic_depth/profiles.h>
+#include <jpeglib.h>
+#include <math.h>
+#include <sstream>
+#include <utils/Errors.h>
+#include <utils/Log.h>
+#include <xmpmeta/xmp_data.h>
+#include <xmpmeta/xmp_writer.h>
+
+using dynamic_depth::Camera;
+using dynamic_depth::Cameras;
+using dynamic_depth::CameraParams;
+using dynamic_depth::Container;
+using dynamic_depth::DepthFormat;
+using dynamic_depth::DepthMap;
+using dynamic_depth::DepthMapParams;
+using dynamic_depth::DepthUnits;
+using dynamic_depth::Device;
+using dynamic_depth::DeviceParams;
+using dynamic_depth::Dimension;
+using dynamic_depth::Image;
+using dynamic_depth::ImagingModel;
+using dynamic_depth::ImagingModelParams;
+using dynamic_depth::Item;
+using dynamic_depth::Pose;
+using dynamic_depth::Profile;
+using dynamic_depth::Profiles;
+
+namespace android {
+namespace camera3 {
+
+status_t encodeGrayscaleJpeg(size_t width, size_t height, uint8_t *in, void *out,
+ const size_t maxOutSize, uint8_t jpegQuality, size_t &actualSize) {
+ status_t ret;
+ // libjpeg is a C library so we use C-style "inheritance" by
+ // putting libjpeg's jpeg_destination_mgr first in our custom
+ // struct. This allows us to cast jpeg_destination_mgr* to
+ // CustomJpegDestMgr* when we get it passed to us in a callback.
+ struct CustomJpegDestMgr : public jpeg_destination_mgr {
+ JOCTET *mBuffer;
+ size_t mBufferSize;
+ size_t mEncodedSize;
+ bool mSuccess;
+ } dmgr;
+
+ jpeg_compress_struct cinfo = {};
+ jpeg_error_mgr jerr;
+
+ // Initialize error handling with standard callbacks, but
+ // then override output_message (to print to ALOG) and
+ // error_exit to set a flag and print a message instead
+ // of killing the whole process.
+ cinfo.err = jpeg_std_error(&jerr);
+
+ cinfo.err->output_message = [](j_common_ptr cinfo) {
+ char buffer[JMSG_LENGTH_MAX];
+
+ /* Create the message */
+ (*cinfo->err->format_message)(cinfo, buffer);
+ ALOGE("libjpeg error: %s", buffer);
+ };
+
+ cinfo.err->error_exit = [](j_common_ptr cinfo) {
+ (*cinfo->err->output_message)(cinfo);
+ if(cinfo->client_data) {
+ auto & dmgr = *static_cast<CustomJpegDestMgr*>(cinfo->client_data);
+ dmgr.mSuccess = false;
+ }
+ };
+
+ // Now that we initialized some callbacks, let's create our compressor
+ jpeg_create_compress(&cinfo);
+ dmgr.mBuffer = static_cast<JOCTET*>(out);
+ dmgr.mBufferSize = maxOutSize;
+ dmgr.mEncodedSize = 0;
+ dmgr.mSuccess = true;
+ cinfo.client_data = static_cast<void*>(&dmgr);
+
+ // These lambdas become C-style function pointers and as per C++11 spec
+ // may not capture anything.
+ dmgr.init_destination = [](j_compress_ptr cinfo) {
+ auto & dmgr = static_cast<CustomJpegDestMgr&>(*cinfo->dest);
+ dmgr.next_output_byte = dmgr.mBuffer;
+ dmgr.free_in_buffer = dmgr.mBufferSize;
+ ALOGV("%s:%d jpeg start: %p [%zu]",
+ __FUNCTION__, __LINE__, dmgr.mBuffer, dmgr.mBufferSize);
+ };
+
+ dmgr.empty_output_buffer = [](j_compress_ptr cinfo __unused) {
+ ALOGV("%s:%d Out of buffer", __FUNCTION__, __LINE__);
+ return 0;
+ };
+
+ dmgr.term_destination = [](j_compress_ptr cinfo) {
+ auto & dmgr = static_cast<CustomJpegDestMgr&>(*cinfo->dest);
+ dmgr.mEncodedSize = dmgr.mBufferSize - dmgr.free_in_buffer;
+ ALOGV("%s:%d Done with jpeg: %zu", __FUNCTION__, __LINE__, dmgr.mEncodedSize);
+ };
+ cinfo.dest = reinterpret_cast<struct jpeg_destination_mgr*>(&dmgr);
+ cinfo.image_width = width;
+ cinfo.image_height = height;
+ cinfo.input_components = 1;
+ cinfo.in_color_space = JCS_GRAYSCALE;
+
+ // Initialize defaults and then override what we want
+ jpeg_set_defaults(&cinfo);
+
+ jpeg_set_quality(&cinfo, jpegQuality, 1);
+ jpeg_set_colorspace(&cinfo, JCS_GRAYSCALE);
+ cinfo.raw_data_in = 0;
+ cinfo.dct_method = JDCT_IFAST;
+
+ cinfo.comp_info[0].h_samp_factor = 1;
+ cinfo.comp_info[1].h_samp_factor = 1;
+ cinfo.comp_info[2].h_samp_factor = 1;
+ cinfo.comp_info[0].v_samp_factor = 1;
+ cinfo.comp_info[1].v_samp_factor = 1;
+ cinfo.comp_info[2].v_samp_factor = 1;
+
+ jpeg_start_compress(&cinfo, TRUE);
+
+ for (size_t i = 0; i < cinfo.image_height; i++) {
+ auto currentRow = static_cast<JSAMPROW>(in + i*width);
+ jpeg_write_scanlines(&cinfo, ¤tRow, /*num_lines*/1);
+ }
+
+ jpeg_finish_compress(&cinfo);
+
+ actualSize = dmgr.mEncodedSize;
+ if (dmgr.mSuccess) {
+ ret = NO_ERROR;
+ } else {
+ ret = UNKNOWN_ERROR;
+ }
+
+ return ret;
+}
+
+std::unique_ptr<dynamic_depth::DepthMap> processDepthMapFrame(DepthPhotoInputFrame inputFrame,
+ std::vector<std::unique_ptr<Item>> *items /*out*/) {
+ std::vector<float> points, confidence;
+
+ size_t pointCount = inputFrame.mDepthMapWidth * inputFrame.mDepthMapHeight;
+ points.reserve(pointCount);
+ confidence.reserve(pointCount);
+ float near = UINT16_MAX;
+ float far = .0f;
+ for (size_t i = 0; i < inputFrame.mDepthMapHeight; i++) {
+ for (size_t j = 0; j < inputFrame.mDepthMapWidth; j++) {
+ // Android densely packed depth map. The units for the range are in
+ // millimeters and need to be scaled to meters.
+ // The confidence value is encoded in the 3 most significant bits.
+ // The confidence data needs to be additionally normalized with
+ // values 1.0f, 0.0f representing maximum and minimum confidence
+ // respectively.
+ auto value = inputFrame.mDepthMapBuffer[i*inputFrame.mDepthMapStride + j];
+ auto point = static_cast<float>(value & 0x1FFF) / 1000.f;
+ points.push_back(point);
+
+ auto conf = (value >> 13) & 0x7;
+ float normConfidence = (conf == 0) ? 1.f : (static_cast<float>(conf) - 1) / 7.f;
+ confidence.push_back(normConfidence);
+
+ if (near > point) {
+ near = point;
+ }
+ if (far < point) {
+ far = point;
+ }
+ }
+ }
+
+ if (near == far) {
+ ALOGE("%s: Near and far range values must not match!", __FUNCTION__);
+ return nullptr;
+ }
+
+ std::vector<uint8_t> pointsQuantized, confidenceQuantized;
+ pointsQuantized.reserve(pointCount); confidenceQuantized.reserve(pointCount);
+ auto pointIt = points.begin();
+ auto confidenceIt = confidence.begin();
+ while ((pointIt != points.end()) && (confidenceIt != confidence.end())) {
+ pointsQuantized.push_back(floorf(((far * (*pointIt - near)) /
+ (*pointIt * (far - near))) * 255.0f));
+ confidenceQuantized.push_back(floorf(*confidenceIt * 255.0f));
+ confidenceIt++; pointIt++;
+ }
+
+ DepthMapParams depthParams(DepthFormat::kRangeInverse, near, far, DepthUnits::kMeters,
+ "android/depthmap");
+ depthParams.confidence_uri = "android/confidencemap";
+ depthParams.mime = "image/jpeg";
+ depthParams.depth_image_data.resize(inputFrame.mMaxJpegSize);
+ depthParams.confidence_data.resize(inputFrame.mMaxJpegSize);
+ size_t actualJpegSize;
+ auto ret = encodeGrayscaleJpeg(inputFrame.mDepthMapWidth, inputFrame.mDepthMapHeight,
+ pointsQuantized.data(), depthParams.depth_image_data.data(), inputFrame.mMaxJpegSize,
+ inputFrame.mJpegQuality, actualJpegSize);
+ if (ret != NO_ERROR) {
+ ALOGE("%s: Depth map compression failed!", __FUNCTION__);
+ return nullptr;
+ }
+ depthParams.depth_image_data.resize(actualJpegSize);
+
+ ret = encodeGrayscaleJpeg(inputFrame.mDepthMapWidth, inputFrame.mDepthMapHeight,
+ confidenceQuantized.data(), depthParams.confidence_data.data(), inputFrame.mMaxJpegSize,
+ inputFrame.mJpegQuality, actualJpegSize);
+ if (ret != NO_ERROR) {
+ ALOGE("%s: Confidence map compression failed!", __FUNCTION__);
+ return nullptr;
+ }
+ depthParams.confidence_data.resize(actualJpegSize);
+
+ return DepthMap::FromData(depthParams, items);
+}
+
+extern "C" int processDepthPhotoFrame(DepthPhotoInputFrame inputFrame, size_t depthPhotoBufferSize,
+ void* depthPhotoBuffer /*out*/, size_t* depthPhotoActualSize /*out*/) {
+ if ((inputFrame.mMainJpegBuffer == nullptr) || (inputFrame.mDepthMapBuffer == nullptr) ||
+ (depthPhotoBuffer == nullptr) || (depthPhotoActualSize == nullptr)) {
+ return BAD_VALUE;
+ }
+
+ std::vector<std::unique_ptr<Item>> items;
+ std::vector<std::unique_ptr<Camera>> cameraList;
+ auto image = Image::FromDataForPrimaryImage("android/mainimage", &items);
+ std::unique_ptr<CameraParams> cameraParams(new CameraParams(std::move(image)));
+ if (cameraParams == nullptr) {
+ ALOGE("%s: Failed to initialize camera parameters", __FUNCTION__);
+ return BAD_VALUE;
+ }
+
+ cameraParams->depth_map = processDepthMapFrame(inputFrame, &items);
+ if (cameraParams->depth_map == nullptr) {
+ ALOGE("%s: Depth map processing failed!", __FUNCTION__);
+ return BAD_VALUE;
+ }
+
+ // It is not possible to generate an imaging model without instrinsic calibration.
+ if (inputFrame.mIsInstrinsicCalibrationValid) {
+ // The camera intrinsic calibration layout is as follows:
+ // [focalLengthX, focalLengthY, opticalCenterX, opticalCenterY, skew]
+ const dynamic_depth::Point<double> focalLength(inputFrame.mInstrinsicCalibration[0],
+ inputFrame.mInstrinsicCalibration[1]);
+ const Dimension imageSize(inputFrame.mMainJpegWidth, inputFrame.mMainJpegHeight);
+ ImagingModelParams imagingParams(focalLength, imageSize);
+ imagingParams.principal_point.x = inputFrame.mInstrinsicCalibration[2];
+ imagingParams.principal_point.y = inputFrame.mInstrinsicCalibration[3];
+ imagingParams.skew = inputFrame.mInstrinsicCalibration[4];
+
+ // The camera lens distortion contains the following lens correction coefficients.
+ // [kappa_1, kappa_2, kappa_3 kappa_4, kappa_5]
+ if (inputFrame.mIsLensDistortionValid) {
+ // According to specification the lens distortion coefficients should be ordered
+ // as [1, kappa_4, kappa_1, kappa_5, kappa_2, 0, kappa_3, 0]
+ float distortionData[] = {1.f, inputFrame.mLensDistortion[3],
+ inputFrame.mLensDistortion[0], inputFrame.mLensDistortion[4],
+ inputFrame.mLensDistortion[1], 0.f, inputFrame.mLensDistortion[2], 0.f};
+ auto distortionDataLength = sizeof(distortionData) / sizeof(distortionData[0]);
+ imagingParams.distortion.reserve(distortionDataLength);
+ imagingParams.distortion.insert(imagingParams.distortion.end(), distortionData,
+ distortionData + distortionDataLength);
+ }
+
+ cameraParams->imaging_model = ImagingModel::FromData(imagingParams);
+ }
+
+ if (inputFrame.mIsLogical) {
+ cameraParams->trait = dynamic_depth::CameraTrait::LOGICAL;
+ } else {
+ cameraParams->trait = dynamic_depth::CameraTrait::PHYSICAL;
+ }
+
+ cameraList.emplace_back(Camera::FromData(std::move(cameraParams)));
+
+ auto deviceParams = std::make_unique<DeviceParams> (Cameras::FromCameraArray(&cameraList));
+ deviceParams->container = Container::FromItems(&items);
+ std::vector<std::unique_ptr<Profile>> profileList;
+ profileList.emplace_back(Profile::FromData("DepthPhoto", {0}));
+ deviceParams->profiles = Profiles::FromProfileArray(&profileList);
+ std::unique_ptr<Device> device = Device::FromData(std::move(deviceParams));
+ if (device == nullptr) {
+ ALOGE("%s: Failed to initialize camera device", __FUNCTION__);
+ return BAD_VALUE;
+ }
+
+ std::istringstream inputJpegStream(
+ std::string(inputFrame.mMainJpegBuffer, inputFrame.mMainJpegSize));
+ std::ostringstream outputJpegStream;
+ if (!WriteImageAndMetadataAndContainer(&inputJpegStream, device.get(), &outputJpegStream)) {
+ ALOGE("%s: Failed writing depth output", __FUNCTION__);
+ return BAD_VALUE;
+ }
+
+ *depthPhotoActualSize = static_cast<size_t> (outputJpegStream.tellp());
+ if (*depthPhotoActualSize > depthPhotoBufferSize) {
+ ALOGE("%s: Depth photo output buffer not sufficient, needed %zu actual %zu", __FUNCTION__,
+ *depthPhotoActualSize, depthPhotoBufferSize);
+ return NO_MEMORY;
+ }
+
+ memcpy(depthPhotoBuffer, outputJpegStream.str().c_str(), *depthPhotoActualSize);
+
+ return 0;
+}
+
+}; // namespace camera3
+}; // namespace android
diff --git a/services/camera/libcameraservice/common/DepthPhotoProcessor.h b/services/camera/libcameraservice/common/DepthPhotoProcessor.h
new file mode 100644
index 0000000..19889a1
--- /dev/null
+++ b/services/camera/libcameraservice/common/DepthPhotoProcessor.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA_CAMERA3_DEPTH_PROCESSOR_H
+#define ANDROID_SERVERS_CAMERA_CAMERA3_DEPTH_PROCESSOR_H
+
+#include <stddef.h>
+#include <stdint.h>
+
+namespace android {
+namespace camera3 {
+
+struct DepthPhotoInputFrame {
+ const char* mMainJpegBuffer;
+ size_t mMainJpegSize;
+ size_t mMainJpegWidth, mMainJpegHeight;
+ uint16_t* mDepthMapBuffer;
+ size_t mDepthMapWidth, mDepthMapHeight, mDepthMapStride;
+ size_t mMaxJpegSize;
+ uint8_t mJpegQuality;
+ uint8_t mIsLogical;
+ float mInstrinsicCalibration[5];
+ uint8_t mIsInstrinsicCalibrationValid;
+ float mLensDistortion[5];
+ uint8_t mIsLensDistortionValid;
+
+ DepthPhotoInputFrame() :
+ mMainJpegBuffer(nullptr),
+ mMainJpegSize(0),
+ mMainJpegWidth(0),
+ mMainJpegHeight(0),
+ mDepthMapBuffer(nullptr),
+ mDepthMapWidth(0),
+ mDepthMapHeight(0),
+ mDepthMapStride(0),
+ mMaxJpegSize(0),
+ mJpegQuality(100),
+ mIsLogical(0),
+ mInstrinsicCalibration{0.f},
+ mIsInstrinsicCalibrationValid(0),
+ mLensDistortion{0.f},
+ mIsLensDistortionValid(0) {}
+};
+
+static const char *kDepthPhotoLibrary = "libdepthphoto.so";
+static const char *kDepthPhotoProcessFunction = "processDepthPhotoFrame";
+typedef int (*process_depth_photo_frame) (DepthPhotoInputFrame /*inputFrame*/,
+ size_t /*depthPhotoBufferSize*/, void* /*depthPhotoBuffer out*/,
+ size_t* /*depthPhotoActualSize out*/);
+
+}; // namespace camera3
+}; // namespace android
+
+#endif
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index 12fbf82..99b8043 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -3119,10 +3119,12 @@
status_t res = OK;
if (it != outputSurfaces.end()) {
res = stream->returnBuffer(
- outputBuffers[i], timestamp, timestampIncreasing, it->second);
+ outputBuffers[i], timestamp, timestampIncreasing, it->second,
+ inResultExtras.frameNumber);
} else {
res = stream->returnBuffer(
- outputBuffers[i], timestamp, timestampIncreasing);
+ outputBuffers[i], timestamp, timestampIncreasing, std::vector<size_t> (),
+ inResultExtras.frameNumber);
}
// Note: stream may be deallocated at this point, if this buffer was
@@ -3139,7 +3141,8 @@
// cancel the buffer
camera3_stream_buffer_t sb = outputBuffers[i];
sb.status = CAMERA3_BUFFER_STATUS_ERROR;
- stream->returnBuffer(sb, /*timestamp*/0, timestampIncreasing);
+ stream->returnBuffer(sb, /*timestamp*/0, timestampIncreasing, std::vector<size_t> (),
+ inResultExtras.frameNumber);
// notify client buffer error
sp<NotificationListener> listener;
@@ -3279,7 +3282,8 @@
streamBuffer.stream = halStream;
switch (halStream->stream_type) {
case CAMERA3_STREAM_OUTPUT:
- res = stream->returnBuffer(streamBuffer, /*timestamp*/ 0);
+ res = stream->returnBuffer(streamBuffer, /*timestamp*/ 0,
+ /*timestampIncreasing*/true, std::vector<size_t> (), frameNumber);
if (res != OK) {
ALOGE("%s: Can't return output buffer for frame %d to"
" stream %d: %s (%d)", __FUNCTION__,
@@ -5469,6 +5473,8 @@
return TIMED_OUT;
}
}
+ outputStream->fireBufferRequestForFrameNumber(
+ captureRequest->mResultExtras.frameNumber);
String8 physicalCameraId = outputStream->getPhysicalCameraId();
@@ -5692,7 +5698,9 @@
outputBuffers->editItemAt(i).acquire_fence = -1;
}
outputBuffers->editItemAt(i).status = CAMERA3_BUFFER_STATUS_ERROR;
- captureRequest->mOutputStreams.editItemAt(i)->returnBuffer((*outputBuffers)[i], 0);
+ captureRequest->mOutputStreams.editItemAt(i)->returnBuffer((*outputBuffers)[i], 0,
+ /*timestampIncreasing*/true, std::vector<size_t> (),
+ captureRequest->mResultExtras.frameNumber);
}
if (sendRequestError) {
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.cpp b/services/camera/libcameraservice/device3/Camera3Stream.cpp
index 24d1c1b..b296513 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Stream.cpp
@@ -656,7 +656,7 @@
status_t Camera3Stream::returnBuffer(const camera3_stream_buffer &buffer,
nsecs_t timestamp, bool timestampIncreasing,
- const std::vector<size_t>& surface_ids) {
+ const std::vector<size_t>& surface_ids, uint64_t frameNumber) {
ATRACE_CALL();
Mutex::Autolock l(mLock);
@@ -687,7 +687,7 @@
*/
status_t res = returnBufferLocked(b, timestamp, surface_ids);
if (res == OK) {
- fireBufferListenersLocked(b, /*acquired*/false, /*output*/true);
+ fireBufferListenersLocked(b, /*acquired*/false, /*output*/true, timestamp, frameNumber);
}
// Even if returning the buffer failed, we still want to signal whoever is waiting for the
@@ -763,8 +763,21 @@
return getInputBufferProducerLocked(producer);
}
+void Camera3Stream::fireBufferRequestForFrameNumber(uint64_t frameNumber) {
+ ATRACE_CALL();
+ Mutex::Autolock l(mLock);
+
+ for (auto &it : mBufferListenerList) {
+ sp<Camera3StreamBufferListener> listener = it.promote();
+ if (listener.get() != nullptr) {
+ listener->onBufferRequestForFrameNumber(frameNumber, getId());
+ }
+ }
+}
+
void Camera3Stream::fireBufferListenersLocked(
- const camera3_stream_buffer& buffer, bool acquired, bool output) {
+ const camera3_stream_buffer& buffer, bool acquired, bool output, nsecs_t timestamp,
+ uint64_t frameNumber) {
List<wp<Camera3StreamBufferListener> >::iterator it, end;
// TODO: finish implementing
@@ -773,6 +786,8 @@
Camera3StreamBufferListener::BufferInfo();
info.mOutput = output;
info.mError = (buffer.status == CAMERA3_BUFFER_STATUS_ERROR);
+ info.mFrameNumber = frameNumber;
+ info.mTimestamp = timestamp;
// TODO: rest of fields
for (it = mBufferListenerList.begin(), end = mBufferListenerList.end();
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.h b/services/camera/libcameraservice/device3/Camera3Stream.h
index ddba9f6..06deba9 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.h
+++ b/services/camera/libcameraservice/device3/Camera3Stream.h
@@ -332,7 +332,8 @@
*/
status_t returnBuffer(const camera3_stream_buffer &buffer,
nsecs_t timestamp, bool timestampIncreasing,
- const std::vector<size_t>& surface_ids = std::vector<size_t>());
+ const std::vector<size_t>& surface_ids = std::vector<size_t>(),
+ uint64_t frameNumber = 0);
/**
* Fill in the camera3_stream_buffer with the next valid buffer for this
@@ -430,6 +431,11 @@
*/
status_t restoreConfiguredState();
+ /**
+ * Notify buffer stream listeners about incoming request with particular frame number.
+ */
+ void fireBufferRequestForFrameNumber(uint64_t frameNumber) override;
+
protected:
const int mId;
/**
@@ -538,7 +544,7 @@
static const nsecs_t kWaitForBufferDuration = 3000000000LL; // 3000 ms
void fireBufferListenersLocked(const camera3_stream_buffer& buffer,
- bool acquired, bool output);
+ bool acquired, bool output, nsecs_t timestamp = 0, uint64_t frameNumber = 0);
List<wp<Camera3StreamBufferListener> > mBufferListenerList;
status_t cancelPrepareLocked();
diff --git a/services/camera/libcameraservice/device3/Camera3StreamBufferListener.h b/services/camera/libcameraservice/device3/Camera3StreamBufferListener.h
index 2db333d..0e6104e 100644
--- a/services/camera/libcameraservice/device3/Camera3StreamBufferListener.h
+++ b/services/camera/libcameraservice/device3/Camera3StreamBufferListener.h
@@ -41,6 +41,8 @@
virtual void onBufferAcquired(const BufferInfo& bufferInfo) = 0;
// Buffer was released by the HAL
virtual void onBufferReleased(const BufferInfo& bufferInfo) = 0;
+ // Notify about incoming buffer request frame number
+ virtual void onBufferRequestForFrameNumber(uint64_t frameNumber, int streamId) = 0;
};
}; //namespace camera3
diff --git a/services/camera/libcameraservice/device3/Camera3StreamInterface.h b/services/camera/libcameraservice/device3/Camera3StreamInterface.h
index a84720b..7b80cbd 100644
--- a/services/camera/libcameraservice/device3/Camera3StreamInterface.h
+++ b/services/camera/libcameraservice/device3/Camera3StreamInterface.h
@@ -259,7 +259,8 @@
*/
virtual status_t returnBuffer(const camera3_stream_buffer &buffer,
nsecs_t timestamp, bool timestampIncreasing = true,
- const std::vector<size_t>& surface_ids = std::vector<size_t>()) = 0;
+ const std::vector<size_t>& surface_ids = std::vector<size_t>(),
+ uint64_t frameNumber = 0) = 0;
/**
* Fill in the camera3_stream_buffer with the next valid buffer for this
@@ -341,6 +342,11 @@
* Camera3Stream.
*/
virtual void setBufferFreedListener(wp<Camera3StreamBufferFreedListener> listener) = 0;
+
+ /**
+ * Notify buffer stream listeners about incoming request with particular frame number.
+ */
+ virtual void fireBufferRequestForFrameNumber(uint64_t frameNumber) = 0;
};
} // namespace camera3