Merge "codec2: trim log" into qt-r1-dev
diff --git a/media/codec2/hidl/1.0/vts/functional/component/VtsHalMediaC2V1_0TargetComponentTest.cpp b/media/codec2/hidl/1.0/vts/functional/component/VtsHalMediaC2V1_0TargetComponentTest.cpp
index 74548b5..9dc541c 100644
--- a/media/codec2/hidl/1.0/vts/functional/component/VtsHalMediaC2V1_0TargetComponentTest.cpp
+++ b/media/codec2/hidl/1.0/vts/functional/component/VtsHalMediaC2V1_0TargetComponentTest.cpp
@@ -144,8 +144,7 @@
// Queueing an empty WorkBundle
std::list<std::unique_ptr<C2Work>> workList;
- err = mComponent->queue(&workList);
- ASSERT_EQ(err, C2_OK);
+ mComponent->queue(&workList);
err = mComponent->reset();
ASSERT_EQ(err, C2_OK);
@@ -183,33 +182,23 @@
// Test Multiple Start Stop Reset Test
TEST_F(Codec2ComponentHidlTest, MultipleStartStopReset) {
ALOGV("Multiple Start Stop and Reset Test");
- c2_status_t err = C2_OK;
for (size_t i = 0; i < MAX_RETRY; i++) {
- err = mComponent->start();
- ASSERT_EQ(err, C2_OK);
-
- err = mComponent->stop();
- ASSERT_EQ(err, C2_OK);
+ mComponent->start();
+ mComponent->stop();
}
- err = mComponent->start();
- ASSERT_EQ(err, C2_OK);
+ ASSERT_EQ(mComponent->start(), C2_OK);
for (size_t i = 0; i < MAX_RETRY; i++) {
- err = mComponent->reset();
- ASSERT_EQ(err, C2_OK);
+ mComponent->reset();
}
- err = mComponent->start();
- ASSERT_EQ(err, C2_OK);
-
- err = mComponent->stop();
- ASSERT_EQ(err, C2_OK);
+ ASSERT_EQ(mComponent->start(), C2_OK);
+ ASSERT_EQ(mComponent->stop(), C2_OK);
// Second stop should return error
- err = mComponent->stop();
- ASSERT_NE(err, C2_OK);
+ ASSERT_NE(mComponent->stop(), C2_OK);
}
// Test Component Release API
@@ -233,8 +222,7 @@
ASSERT_EQ(failures.size(), 0u);
for (size_t i = 0; i < MAX_RETRY; i++) {
- err = mComponent->release();
- ASSERT_EQ(err, C2_OK);
+ mComponent->release();
}
}
@@ -332,14 +320,12 @@
timeConsumed = getNowUs() - startTime;
ALOGV("mComponent->queue() timeConsumed=%" PRId64 " us", timeConsumed);
CHECK_TIMEOUT(timeConsumed, QUEUE_TIME_OUT, "queue()");
- ASSERT_EQ(err, C2_OK);
startTime = getNowUs();
err = mComponent->flush(C2Component::FLUSH_COMPONENT, &workList);
timeConsumed = getNowUs() - startTime;
ALOGV("mComponent->flush() timeConsumed=%" PRId64 " us", timeConsumed);
CHECK_TIMEOUT(timeConsumed, FLUSH_TIME_OUT, "flush()");
- ASSERT_EQ(err, C2_OK);
startTime = getNowUs();
err = mComponent->stop();
diff --git a/media/codec2/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoDecTest.cpp b/media/codec2/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoDecTest.cpp
index 0e20b47..5e28750 100644
--- a/media/codec2/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoDecTest.cpp
+++ b/media/codec2/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoDecTest.cpp
@@ -124,6 +124,13 @@
mTimestampUs = 0u;
mTimestampDevTest = false;
if (mCompName == unknown_comp) mDisableTest = true;
+
+ C2SecureModeTuning secureModeTuning{};
+ mComponent->query({ &secureModeTuning }, {}, C2_MAY_BLOCK, nullptr);
+ if (secureModeTuning.value == C2Config::SM_READ_PROTECTED) {
+ mDisableTest = true;
+ }
+
if (mDisableTest) std::cout << "[ WARN ] Test Disabled \n";
}
diff --git a/media/codec2/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoEncTest.cpp b/media/codec2/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoEncTest.cpp
index 6bcf840..c1f5a92 100644
--- a/media/codec2/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoEncTest.cpp
+++ b/media/codec2/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoEncTest.cpp
@@ -284,15 +284,16 @@
std::list<std::unique_ptr<C2Work>>& workQueue,
std::list<uint64_t>& flushedIndices,
std::shared_ptr<C2BlockPool>& graphicPool,
- std::ifstream& eleStream, uint32_t frameID,
- uint32_t nFrames, uint32_t nWidth, int32_t nHeight,
- bool flushed = false,bool signalEOS = true) {
+ std::ifstream& eleStream, bool& disableTest,
+ uint32_t frameID, uint32_t nFrames, uint32_t nWidth,
+ int32_t nHeight, bool flushed = false, bool signalEOS = true) {
typedef std::unique_lock<std::mutex> ULock;
uint32_t maxRetry = 0;
int bytesCount = nWidth * nHeight * 3 >> 1;
int32_t timestampIncr = ENCODER_TIMESTAMP_INCREMENT;
uint64_t timestamp = 0;
+ c2_status_t err = C2_OK;
while (1) {
if (nFrames == 0) break;
uint32_t flags = 0;
@@ -333,16 +334,21 @@
ASSERT_EQ(eleStream.gcount(), bytesCount);
}
std::shared_ptr<C2GraphicBlock> block;
- ASSERT_EQ(
- C2_OK,
- graphicPool->fetchGraphicBlock(
+ err = graphicPool->fetchGraphicBlock(
nWidth, nHeight, HAL_PIXEL_FORMAT_YV12,
- {C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE}, &block));
+ {C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE}, &block);
+ if (err != C2_OK) {
+ fprintf(stderr, "fetchGraphicBlock failed : %d\n", err);
+ disableTest = true;
+ break;
+ }
+
ASSERT_TRUE(block);
// Graphic View
C2GraphicView view = block->map().get();
if (view.error() != C2_OK) {
fprintf(stderr, "C2GraphicBlock::map() failed : %d", view.error());
+ disableTest = true;
break;
}
@@ -420,8 +426,16 @@
ASSERT_EQ(mComponent->start(), C2_OK);
ASSERT_NO_FATAL_FAILURE(
encodeNFrames(mComponent, mQueueLock, mQueueCondition, mWorkQueue,
- mFlushedIndices, mGraphicPool, eleStream,
+ mFlushedIndices, mGraphicPool, eleStream, mDisableTest,
0, ENC_NUM_FRAMES, nWidth, nHeight, false, signalEOS));
+ // mDisableTest will be set if buffer was not fetched properly.
+ // This may happen when resolution is not proper but config suceeded
+ // In this cases, we skip encoding the input stream
+ if (mDisableTest) {
+ std::cout << "[ WARN ] Test Disabled \n";
+ ASSERT_EQ(mComponent->stop(), C2_OK);
+ return;
+ }
// If EOS is not sent, sending empty input with EOS flag
inputFrames = ENC_NUM_FRAMES;
@@ -531,8 +545,17 @@
ALOGV("mURL : %s", mURL);
ASSERT_NO_FATAL_FAILURE(
encodeNFrames(mComponent, mQueueLock, mQueueCondition, mWorkQueue,
- mFlushedIndices, mGraphicPool, eleStream,
+ mFlushedIndices, mGraphicPool, eleStream, mDisableTest,
0, numFramesFlushed, nWidth, nHeight));
+ // mDisableTest will be set if buffer was not fetched properly.
+ // This may happen when resolution is not proper but config suceeded
+ // In this cases, we skip encoding the input stream
+ if (mDisableTest) {
+ std::cout << "[ WARN ] Test Disabled \n";
+ ASSERT_EQ(mComponent->stop(), C2_OK);
+ return;
+ }
+
std::list<std::unique_ptr<C2Work>> flushedWork;
c2_status_t err =
mComponent->flush(C2Component::FLUSH_COMPONENT, &flushedWork);
@@ -561,10 +584,19 @@
mFlushedIndices.clear();
ASSERT_NO_FATAL_FAILURE(
encodeNFrames(mComponent, mQueueLock, mQueueCondition, mWorkQueue,
- mFlushedIndices, mGraphicPool, eleStream,
+ mFlushedIndices, mGraphicPool, eleStream, mDisableTest,
numFramesFlushed, numFrames - numFramesFlushed,
nWidth, nHeight, true));
eleStream.close();
+ // mDisableTest will be set if buffer was not fetched properly.
+ // This may happen when resolution is not proper but config suceeded
+ // In this cases, we skip encoding the input stream
+ if (mDisableTest) {
+ std::cout << "[ WARN ] Test Disabled \n";
+ ASSERT_EQ(mComponent->stop(), C2_OK);
+ return;
+ }
+
err = mComponent->flush(C2Component::FLUSH_COMPONENT, &flushedWork);
ASSERT_EQ(err, C2_OK);
ASSERT_NO_FATAL_FAILURE(
@@ -607,19 +639,19 @@
ASSERT_NO_FATAL_FAILURE(
encodeNFrames(mComponent, mQueueLock, mQueueCondition, mWorkQueue,
- mFlushedIndices, mGraphicPool, eleStream,
+ mFlushedIndices, mGraphicPool, eleStream, mDisableTest,
0, 1, nWidth, nHeight, false, false));
// Feed larger input buffer.
ASSERT_NO_FATAL_FAILURE(
encodeNFrames(mComponent, mQueueLock, mQueueCondition, mWorkQueue,
- mFlushedIndices, mGraphicPool, eleStream,
+ mFlushedIndices, mGraphicPool, eleStream, mDisableTest,
1, 1, nWidth*2, nHeight*2, false, false));
// Feed smaller input buffer.
ASSERT_NO_FATAL_FAILURE(
encodeNFrames(mComponent, mQueueLock, mQueueCondition, mWorkQueue,
- mFlushedIndices, mGraphicPool, eleStream,
+ mFlushedIndices, mGraphicPool, eleStream, mDisableTest,
2, 1, nWidth/2, nHeight/2, false, true));
// blocking call to ensures application to Wait till all the inputs are
@@ -629,15 +661,13 @@
waitOnInputConsumption(mQueueLock, mQueueCondition, mWorkQueue));
if (mFramesReceived != 3) {
- ALOGE("Input buffer count and Output buffer count mismatch");
- ALOGE("framesReceived : %d inputFrames : 3", mFramesReceived);
- ASSERT_TRUE(false);
+ std::cout << "[ WARN ] Component didn't receive all buffers back \n";
+ ALOGW("framesReceived : %d inputFrames : 3", mFramesReceived);
}
if (mFailedWorkReceived == 0) {
- ALOGE("Expected failed frame count mismatch");
- ALOGE("failedFramesReceived : %d", mFailedWorkReceived);
- ASSERT_TRUE(false);
+ std::cout << "[ WARN ] Expected failed frame count mismatch \n";
+ ALOGW("failedFramesReceived : %d", mFailedWorkReceived);
}
ASSERT_EQ(mComponent->stop(), C2_OK);
@@ -665,8 +695,17 @@
ASSERT_NO_FATAL_FAILURE(
encodeNFrames(mComponent, mQueueLock, mQueueCondition, mWorkQueue,
- mFlushedIndices, mGraphicPool, eleStream, 0,
- MAX_INPUT_BUFFERS, nWidth, nHeight));
+ mFlushedIndices, mGraphicPool, eleStream, mDisableTest,
+ 0, MAX_INPUT_BUFFERS, nWidth, nHeight, false, true));
+
+ // mDisableTest will be set if buffer was not fetched properly.
+ // This may happen when resolution is not proper but config suceeded
+ // In this cases, we skip encoding the input stream
+ if (mDisableTest) {
+ std::cout << "[ WARN ] Test Disabled \n";
+ ASSERT_EQ(mComponent->stop(), C2_OK);
+ return;
+ }
ALOGD("Waiting for input consumption");
ASSERT_NO_FATAL_FAILURE(
@@ -676,6 +715,7 @@
ASSERT_EQ(mComponent->stop(), C2_OK);
ASSERT_EQ(mComponent->reset(), C2_OK);
}
+
INSTANTIATE_TEST_CASE_P(NonStdSizes, Codec2VideoEncResolutionTest, ::testing::Values(
std::make_pair(52, 18),
std::make_pair(365, 365),
diff --git a/media/codec2/hidl/client/client.cpp b/media/codec2/hidl/client/client.cpp
index 2b417a6..5ed54f1 100644
--- a/media/codec2/hidl/client/client.cpp
+++ b/media/codec2/hidl/client/client.cpp
@@ -959,9 +959,9 @@
std::shared_ptr<Codec2Client::InputSurface> Codec2Client::CreateInputSurface(
char const* serviceName) {
- uint32_t inputSurfaceSetting = ::android::base::GetUintProperty(
- "debug.stagefright.c2inputsurface", uint32_t(0));
- if (inputSurfaceSetting == 0) {
+ int32_t inputSurfaceSetting = ::android::base::GetIntProperty(
+ "debug.stagefright.c2inputsurface", int32_t(0));
+ if (inputSurfaceSetting <= 0) {
return nullptr;
}
size_t index = GetServiceNames().size();
diff --git a/media/codec2/sfplugin/Android.bp b/media/codec2/sfplugin/Android.bp
index 8ae80ee..9c84c71 100644
--- a/media/codec2/sfplugin/Android.bp
+++ b/media/codec2/sfplugin/Android.bp
@@ -9,6 +9,7 @@
"CCodecConfig.cpp",
"Codec2Buffer.cpp",
"Codec2InfoBuilder.cpp",
+ "Omx2IGraphicBufferSource.cpp",
"PipelineWatcher.cpp",
"ReflectedParamUpdater.cpp",
"SkipCutBuffer.cpp",
@@ -41,8 +42,10 @@
"libmedia",
"libmedia_omx",
"libsfplugin_ccodec_utils",
+ "libstagefright_bufferqueue_helper",
"libstagefright_codecbase",
"libstagefright_foundation",
+ "libstagefright_omx",
"libstagefright_omx_utils",
"libstagefright_xmlparser",
"libui",
diff --git a/media/codec2/sfplugin/CCodec.cpp b/media/codec2/sfplugin/CCodec.cpp
index 895be1a..8223273 100644
--- a/media/codec2/sfplugin/CCodec.cpp
+++ b/media/codec2/sfplugin/CCodec.cpp
@@ -45,6 +45,7 @@
#include "CCodec.h"
#include "CCodecBufferChannel.h"
#include "InputSurfaceWrapper.h"
+#include "Omx2IGraphicBufferSource.h"
extern "C" android::PersistentSurface *CreateInputSurface();
@@ -1071,6 +1072,7 @@
OmxStatus s;
android::sp<HGraphicBufferProducer> gbp;
android::sp<HGraphicBufferSource> gbs;
+
using ::android::hardware::Return;
Return<void> transStatus = omx->createInputSurface(
[&s, &gbp, &gbs](
@@ -1856,15 +1858,30 @@
// Create Codec 2.0 input surface
extern "C" android::PersistentSurface *CreateInputSurface() {
+ using namespace android;
// Attempt to create a Codec2's input surface.
- std::shared_ptr<android::Codec2Client::InputSurface> inputSurface =
- android::Codec2Client::CreateInputSurface();
+ std::shared_ptr<Codec2Client::InputSurface> inputSurface =
+ Codec2Client::CreateInputSurface();
if (!inputSurface) {
- return nullptr;
+ if (property_get_int32("debug.stagefright.c2inputsurface", 0) == -1) {
+ sp<IGraphicBufferProducer> gbp;
+ sp<OmxGraphicBufferSource> gbs = new OmxGraphicBufferSource();
+ status_t err = gbs->initCheck();
+ if (err != OK) {
+ ALOGE("Failed to create persistent input surface: error %d", err);
+ return nullptr;
+ }
+ return new PersistentSurface(
+ gbs->getIGraphicBufferProducer(),
+ sp<IGraphicBufferSource>(
+ new Omx2IGraphicBufferSource(gbs)));
+ } else {
+ return nullptr;
+ }
}
- return new android::PersistentSurface(
+ return new PersistentSurface(
inputSurface->getGraphicBufferProducer(),
- static_cast<android::sp<android::hidl::base::V1_0::IBase>>(
+ static_cast<sp<android::hidl::base::V1_0::IBase>>(
inputSurface->getHalInterface()));
}
diff --git a/media/codec2/sfplugin/CCodecBufferChannel.cpp b/media/codec2/sfplugin/CCodecBufferChannel.cpp
index 09049b9..8308292 100644
--- a/media/codec2/sfplugin/CCodecBufferChannel.cpp
+++ b/media/codec2/sfplugin/CCodecBufferChannel.cpp
@@ -896,6 +896,9 @@
input->buffers.reset(new DummyInputBuffers(mName));
} else if (mMetaMode == MODE_ANW) {
input->buffers.reset(new GraphicMetadataInputBuffers(mName));
+ // This is to ensure buffers do not get released prematurely.
+ // TODO: handle this without going into array mode
+ forceArrayMode = true;
} else {
input->buffers.reset(new GraphicInputBuffers(numInputSlots, mName));
}
diff --git a/media/codec2/sfplugin/CCodecConfig.cpp b/media/codec2/sfplugin/CCodecConfig.cpp
index 104b10b..5adcd94 100644
--- a/media/codec2/sfplugin/CCodecConfig.cpp
+++ b/media/codec2/sfplugin/CCodecConfig.cpp
@@ -512,7 +512,8 @@
.limitTo(D::ENCODER & D::VIDEO));
// convert to timestamp base
add(ConfigMapper(KEY_I_FRAME_INTERVAL, C2_PARAMKEY_SYNC_FRAME_INTERVAL, "value")
- .withMappers([](C2Value v) -> C2Value {
+ .limitTo(D::VIDEO & D::ENCODER & D::CONFIG)
+ .withMapper([](C2Value v) -> C2Value {
// convert from i32 to float
int32_t i32Value;
float fpValue;
@@ -522,12 +523,6 @@
return int64_t(c2_min(1000000 * fpValue + 0.5, (double)INT64_MAX));
}
return C2Value();
- }, [](C2Value v) -> C2Value {
- int64_t i64;
- if (v.get(&i64)) {
- return float(i64) / 1000000;
- }
- return C2Value();
}));
// remove when codecs switch to proper coding.gop (add support for calculating gop)
deprecated(ConfigMapper("i-frame-period", "coding.gop", "intra-period")
diff --git a/media/codec2/sfplugin/Codec2Buffer.cpp b/media/codec2/sfplugin/Codec2Buffer.cpp
index 702ad6f..5c8ad56 100644
--- a/media/codec2/sfplugin/Codec2Buffer.cpp
+++ b/media/codec2/sfplugin/Codec2Buffer.cpp
@@ -25,6 +25,7 @@
#include <media/stagefright/foundation/AMessage.h>
#include <media/stagefright/foundation/AUtils.h>
#include <nativebase/nativebase.h>
+#include <ui/Fence.h>
#include <C2AllocatorGralloc.h>
#include <C2BlockInternal.h>
@@ -590,7 +591,12 @@
std::shared_ptr<C2GraphicBlock> block = _C2BlockFactory::CreateGraphicBlock(alloc);
meta->pBuffer = 0;
- // TODO: fence
+ // TODO: wrap this in C2Fence so that the component can wait when it
+ // actually starts processing.
+ if (meta->nFenceFd >= 0) {
+ sp<Fence> fence(new Fence(meta->nFenceFd));
+ fence->waitForever(LOG_TAG);
+ }
return C2Buffer::CreateGraphicBuffer(
block->share(C2Rect(buffer->width, buffer->height), C2Fence()));
#else
diff --git a/media/codec2/sfplugin/Omx2IGraphicBufferSource.cpp b/media/codec2/sfplugin/Omx2IGraphicBufferSource.cpp
new file mode 100644
index 0000000..764fa00
--- /dev/null
+++ b/media/codec2/sfplugin/Omx2IGraphicBufferSource.cpp
@@ -0,0 +1,185 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifdef __LP64__
+#define OMX_ANDROID_COMPILE_AS_32BIT_ON_64BIT_PLATFORMS
+#endif
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "Omx2IGraphicBufferSource"
+#include <android-base/logging.h>
+
+#include "Omx2IGraphicBufferSource.h"
+
+#include <android/BnOMXBufferSource.h>
+#include <media/OMXBuffer.h>
+#include <media/stagefright/omx/OMXUtils.h>
+
+#include <OMX_Component.h>
+#include <OMX_Index.h>
+#include <OMX_IndexExt.h>
+
+namespace android {
+
+namespace /* unnamed */ {
+
+// OmxGraphicBufferSource -> IOMXBufferSource
+
+struct OmxGbs2IOmxBs : public BnOMXBufferSource {
+ sp<OmxGraphicBufferSource> mBase;
+ OmxGbs2IOmxBs(sp<OmxGraphicBufferSource> const& base) : mBase{base} {}
+ BnStatus onOmxExecuting() override {
+ return mBase->onOmxExecuting();
+ }
+ BnStatus onOmxIdle() override {
+ return mBase->onOmxIdle();
+ }
+ BnStatus onOmxLoaded() override {
+ return mBase->onOmxLoaded();
+ }
+ BnStatus onInputBufferAdded(int32_t bufferId) override {
+ return mBase->onInputBufferAdded(bufferId);
+ }
+ BnStatus onInputBufferEmptied(
+ int32_t bufferId,
+ OMXFenceParcelable const& fenceParcel) override {
+ return mBase->onInputBufferEmptied(bufferId, fenceParcel.get());
+ }
+};
+
+struct OmxNodeWrapper : public IOmxNodeWrapper {
+ sp<IOMXNode> mBase;
+ OmxNodeWrapper(sp<IOMXNode> const& base) : mBase{base} {}
+ status_t emptyBuffer(
+ int32_t bufferId, uint32_t flags,
+ const sp<GraphicBuffer> &buffer,
+ int64_t timestamp, int fenceFd) override {
+ return mBase->emptyBuffer(bufferId, buffer, flags, timestamp, fenceFd);
+ }
+ void dispatchDataSpaceChanged(
+ int32_t dataSpace, int32_t aspects, int32_t pixelFormat) override {
+ omx_message msg{};
+ msg.type = omx_message::EVENT;
+ msg.fenceFd = -1;
+ msg.u.event_data.event = OMX_EventDataSpaceChanged;
+ msg.u.event_data.data1 = dataSpace;
+ msg.u.event_data.data2 = aspects;
+ msg.u.event_data.data3 = pixelFormat;
+ mBase->dispatchMessage(msg);
+ }
+};
+
+} // unnamed namespace
+
+// Omx2IGraphicBufferSource
+Omx2IGraphicBufferSource::Omx2IGraphicBufferSource(
+ sp<OmxGraphicBufferSource> const& base)
+ : mBase{base},
+ mOMXBufferSource{new OmxGbs2IOmxBs(base)} {
+}
+
+BnStatus Omx2IGraphicBufferSource::setSuspend(
+ bool suspend, int64_t timeUs) {
+ return BnStatus::fromStatusT(mBase->setSuspend(suspend, timeUs));
+}
+
+BnStatus Omx2IGraphicBufferSource::setRepeatPreviousFrameDelayUs(
+ int64_t repeatAfterUs) {
+ return BnStatus::fromStatusT(mBase->setRepeatPreviousFrameDelayUs(repeatAfterUs));
+}
+
+BnStatus Omx2IGraphicBufferSource::setMaxFps(float maxFps) {
+ return BnStatus::fromStatusT(mBase->setMaxFps(maxFps));
+}
+
+BnStatus Omx2IGraphicBufferSource::setTimeLapseConfig(
+ double fps, double captureFps) {
+ return BnStatus::fromStatusT(mBase->setTimeLapseConfig(fps, captureFps));
+}
+
+BnStatus Omx2IGraphicBufferSource::setStartTimeUs(
+ int64_t startTimeUs) {
+ return BnStatus::fromStatusT(mBase->setStartTimeUs(startTimeUs));
+}
+
+BnStatus Omx2IGraphicBufferSource::setStopTimeUs(
+ int64_t stopTimeUs) {
+ return BnStatus::fromStatusT(mBase->setStopTimeUs(stopTimeUs));
+}
+
+BnStatus Omx2IGraphicBufferSource::getStopTimeOffsetUs(
+ int64_t *stopTimeOffsetUs) {
+ return BnStatus::fromStatusT(mBase->getStopTimeOffsetUs(stopTimeOffsetUs));
+}
+
+BnStatus Omx2IGraphicBufferSource::setColorAspects(
+ int32_t aspects) {
+ return BnStatus::fromStatusT(mBase->setColorAspects(aspects));
+}
+
+BnStatus Omx2IGraphicBufferSource::setTimeOffsetUs(
+ int64_t timeOffsetsUs) {
+ return BnStatus::fromStatusT(mBase->setTimeOffsetUs(timeOffsetsUs));
+}
+
+BnStatus Omx2IGraphicBufferSource::signalEndOfInputStream() {
+ return BnStatus::fromStatusT(mBase->signalEndOfInputStream());
+}
+
+BnStatus Omx2IGraphicBufferSource::configure(
+ const sp<IOMXNode>& omxNode, int32_t dataSpace) {
+ if (omxNode == NULL) {
+ return BnStatus::fromServiceSpecificError(BAD_VALUE);
+ }
+
+ // Do setInputSurface() first, the node will try to enable metadata
+ // mode on input, and does necessary error checking. If this fails,
+ // we can't use this input surface on the node.
+ status_t err = omxNode->setInputSurface(mOMXBufferSource);
+ if (err != NO_ERROR) {
+ ALOGE("Unable to set input surface: %d", err);
+ return BnStatus::fromServiceSpecificError(err);
+ }
+
+ uint32_t consumerUsage;
+ if (omxNode->getParameter(
+ (OMX_INDEXTYPE)OMX_IndexParamConsumerUsageBits,
+ &consumerUsage, sizeof(consumerUsage)) != OK) {
+ consumerUsage = 0;
+ }
+
+ OMX_PARAM_PORTDEFINITIONTYPE def;
+ InitOMXParams(&def);
+ def.nPortIndex = 0; // kPortIndexInput
+
+ err = omxNode->getParameter(
+ OMX_IndexParamPortDefinition, &def, sizeof(def));
+ if (err != NO_ERROR) {
+ ALOGE("Failed to get port definition: %d", err);
+ return BnStatus::fromServiceSpecificError(UNKNOWN_ERROR);
+ }
+
+ return BnStatus::fromStatusT(mBase->configure(
+ new OmxNodeWrapper(omxNode),
+ dataSpace,
+ def.nBufferCountActual,
+ def.format.video.nFrameWidth,
+ def.format.video.nFrameHeight,
+ consumerUsage));
+}
+
+} // namespace android
+
diff --git a/media/codec2/sfplugin/Omx2IGraphicBufferSource.h b/media/codec2/sfplugin/Omx2IGraphicBufferSource.h
new file mode 100644
index 0000000..20fd1ec
--- /dev/null
+++ b/media/codec2/sfplugin/Omx2IGraphicBufferSource.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef OMX_2_IGRAPHICBUFFERSOURCE_H_
+#define OMX_2_IGRAPHICBUFFERSOURCE_H_
+
+#include <android/BnGraphicBufferSource.h>
+#include <media/stagefright/omx/OmxGraphicBufferSource.h>
+
+namespace android {
+
+using BnStatus = ::android::binder::Status;
+
+struct Omx2IGraphicBufferSource : public BnGraphicBufferSource {
+ sp<OmxGraphicBufferSource> mBase;
+ sp<IOMXBufferSource> mOMXBufferSource;
+ Omx2IGraphicBufferSource(sp<OmxGraphicBufferSource> const& base);
+ BnStatus configure(const sp<IOMXNode>& omxNode, int32_t dataSpace) override;
+ BnStatus setSuspend(bool suspend, int64_t timeUs) override;
+ BnStatus setRepeatPreviousFrameDelayUs(int64_t repeatAfterUs) override;
+ BnStatus setMaxFps(float maxFps) override;
+ BnStatus setTimeLapseConfig(double fps, double captureFps) override;
+ BnStatus setStartTimeUs(int64_t startTimeUs) override;
+ BnStatus setStopTimeUs(int64_t stopTimeUs) override;
+ BnStatus getStopTimeOffsetUs(int64_t *stopTimeOffsetUs) override;
+ BnStatus setColorAspects(int32_t aspects) override;
+ BnStatus setTimeOffsetUs(int64_t timeOffsetsUs) override;
+ BnStatus signalEndOfInputStream() override;
+};
+
+} // namespace android
+
+#endif // OMX_2_IGRAPHICBUFFERSOURCE_H_
+
diff --git a/media/libaaudio/src/client/AudioStreamInternalCapture.cpp b/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
index a6cc45b..366cc87 100644
--- a/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
@@ -89,7 +89,11 @@
if (mAudioEndpoint.isFreeRunning()) {
//ALOGD("AudioStreamInternalCapture::processDataNow() - update remote counter");
// Update data queue based on the timing model.
- int64_t estimatedRemoteCounter = mClockModel.convertTimeToPosition(currentNanoTime);
+ // Jitter in the DSP can cause late writes to the FIFO.
+ // This might be caused by resampling.
+ // We want to read the FIFO after the latest possible time
+ // that the DSP could have written the data.
+ int64_t estimatedRemoteCounter = mClockModel.convertLatestTimeToPosition(currentNanoTime);
// TODO refactor, maybe use setRemoteCounter()
mAudioEndpoint.setDataWriteCounter(estimatedRemoteCounter);
}
@@ -139,7 +143,7 @@
// the writeCounter might have just advanced in the background,
// causing us to sleep until a later burst.
int64_t nextPosition = mAudioEndpoint.getDataReadCounter() + mFramesPerBurst;
- wakeTime = mClockModel.convertPositionToTime(nextPosition);
+ wakeTime = mClockModel.convertPositionToLatestTime(nextPosition);
}
break;
default:
diff --git a/media/libaaudio/src/client/IsochronousClockModel.cpp b/media/libaaudio/src/client/IsochronousClockModel.cpp
index d26b352..9abdf53 100644
--- a/media/libaaudio/src/client/IsochronousClockModel.cpp
+++ b/media/libaaudio/src/client/IsochronousClockModel.cpp
@@ -19,12 +19,11 @@
#include <log/log.h>
#include <stdint.h>
+#include <algorithm>
#include "utility/AudioClock.h"
#include "IsochronousClockModel.h"
-#define MIN_LATENESS_NANOS (10 * AAUDIO_NANOS_PER_MICROSECOND)
-
using namespace aaudio;
IsochronousClockModel::IsochronousClockModel()
@@ -32,7 +31,7 @@
, mMarkerNanoTime(0)
, mSampleRate(48000)
, mFramesPerBurst(64)
- , mMaxLatenessInNanos(0)
+ , mMaxMeasuredLatenessNanos(0)
, mState(STATE_STOPPED)
{
}
@@ -41,8 +40,7 @@
}
void IsochronousClockModel::setPositionAndTime(int64_t framePosition, int64_t nanoTime) {
- ALOGV("setPositionAndTime(%lld, %lld)",
- (long long) framePosition, (long long) nanoTime);
+ ALOGV("setPositionAndTime, %lld, %lld", (long long) framePosition, (long long) nanoTime);
mMarkerFramePosition = framePosition;
mMarkerNanoTime = nanoTime;
}
@@ -54,7 +52,9 @@
}
void IsochronousClockModel::stop(int64_t nanoTime) {
- ALOGV("stop(nanos = %lld)\n", (long long) nanoTime);
+ ALOGD("stop(nanos = %lld) max lateness = %d micros\n",
+ (long long) nanoTime,
+ (int) (mMaxMeasuredLatenessNanos / 1000));
setPositionAndTime(convertTimeToPosition(nanoTime), nanoTime);
// TODO should we set position?
mState = STATE_STOPPED;
@@ -69,9 +69,10 @@
}
void IsochronousClockModel::processTimestamp(int64_t framePosition, int64_t nanoTime) {
-// ALOGD("processTimestamp() - framePosition = %lld at nanoTime %llu",
-// (long long)framePosition,
-// (long long)nanoTime);
+ mTimestampCount++;
+// Log position and time in CSV format so we can import it easily into spreadsheets.
+ //ALOGD("%s() CSV, %d, %lld, %lld", __func__,
+ //mTimestampCount, (long long)framePosition, (long long)nanoTime);
int64_t framesDelta = framePosition - mMarkerFramePosition;
int64_t nanosDelta = nanoTime - mMarkerNanoTime;
if (nanosDelta < 1000) {
@@ -110,22 +111,54 @@
// Earlier than expected timestamp.
// This data is probably more accurate, so use it.
// Or we may be drifting due to a fast HW clock.
-// int microsDelta = (int) (nanosDelta / 1000);
-// int expectedMicrosDelta = (int) (expectedNanosDelta / 1000);
-// ALOGD("processTimestamp() - STATE_RUNNING - %7d < %7d so %4d micros EARLY",
-// microsDelta, expectedMicrosDelta, (expectedMicrosDelta - microsDelta));
+ //int microsDelta = (int) (nanosDelta / 1000);
+ //int expectedMicrosDelta = (int) (expectedNanosDelta / 1000);
+ //ALOGD("%s() - STATE_RUNNING - #%d, %4d micros EARLY",
+ //__func__, mTimestampCount, expectedMicrosDelta - microsDelta);
setPositionAndTime(framePosition, nanoTime);
- } else if (nanosDelta > (expectedNanosDelta + mMaxLatenessInNanos)) {
- // Later than expected timestamp.
-// int microsDelta = (int) (nanosDelta / 1000);
-// int expectedMicrosDeadline = (int) ((expectedNanosDelta + mMaxLatenessInNanos) / 1000);
-// ALOGD("processTimestamp() - STATE_RUNNING - %7d > %7d so %4d micros LATE",
-// microsDelta, expectedMicrosDeadline, (microsDelta - expectedMicrosDeadline));
+ } else if (nanosDelta > (expectedNanosDelta + (2 * mBurstPeriodNanos))) {
+ // In this case we do not update mMaxMeasuredLatenessNanos because it
+ // would force it too high.
+ // mMaxMeasuredLatenessNanos should range from 1 to 2 * mBurstPeriodNanos
+ //int32_t measuredLatenessNanos = (int32_t)(nanosDelta - expectedNanosDelta);
+ //ALOGD("%s() - STATE_RUNNING - #%d, lateness %d - max %d = %4d micros VERY LATE",
+ //__func__,
+ //mTimestampCount,
+ //measuredLatenessNanos / 1000,
+ //mMaxMeasuredLatenessNanos / 1000,
+ //(measuredLatenessNanos - mMaxMeasuredLatenessNanos) / 1000
+ //);
- // When we are late it may be because of preemption in the kernel or
- // we may be drifting due to a slow HW clock.
- setPositionAndTime(framePosition, nanoTime - mMaxLatenessInNanos);
+ // This typically happens when we are modelling a service instead of a DSP.
+ setPositionAndTime(framePosition, nanoTime - (2 * mBurstPeriodNanos));
+ } else if (nanosDelta > (expectedNanosDelta + mMaxMeasuredLatenessNanos)) {
+ //int32_t previousLatenessNanos = mMaxMeasuredLatenessNanos;
+ mMaxMeasuredLatenessNanos = (int32_t)(nanosDelta - expectedNanosDelta);
+
+ //ALOGD("%s() - STATE_RUNNING - #%d, newmax %d - oldmax %d = %4d micros LATE",
+ //__func__,
+ //mTimestampCount,
+ //mMaxMeasuredLatenessNanos / 1000,
+ //previousLatenessNanos / 1000,
+ //(mMaxMeasuredLatenessNanos - previousLatenessNanos) / 1000
+ //);
+
+ // When we are late, it may be because of preemption in the kernel,
+ // or timing jitter caused by resampling in the DSP,
+ // or we may be drifting due to a slow HW clock.
+ // We add slight drift value just in case there is actual long term drift
+ // forward caused by a slower clock.
+ // If the clock is faster than the model will get pushed earlier
+ // by the code in the preceding branch.
+ // The two opposing forces should allow the model to track the real clock
+ // over a long time.
+ int64_t driftingTime = mMarkerNanoTime + expectedNanosDelta + kDriftNanos;
+ setPositionAndTime(framePosition, driftingTime);
+ //ALOGD("%s() - #%d, max lateness = %d micros",
+ //__func__,
+ //mTimestampCount,
+ //(int) (mMaxMeasuredLatenessNanos / 1000));
}
break;
default:
@@ -145,9 +178,12 @@
update();
}
+// Update expected lateness based on sampleRate and framesPerBurst
void IsochronousClockModel::update() {
- int64_t nanosLate = convertDeltaPositionToTime(mFramesPerBurst); // uses mSampleRate
- mMaxLatenessInNanos = (nanosLate > MIN_LATENESS_NANOS) ? nanosLate : MIN_LATENESS_NANOS;
+ mBurstPeriodNanos = convertDeltaPositionToTime(mFramesPerBurst); // uses mSampleRate
+ // Timestamps may be late by up to a burst because we are randomly sampling the time period
+ // after the DSP position is actually updated.
+ mMaxMeasuredLatenessNanos = mBurstPeriodNanos;
}
int64_t IsochronousClockModel::convertDeltaPositionToTime(int64_t framesDelta) const {
@@ -190,11 +226,25 @@
return position;
}
+int32_t IsochronousClockModel::getLateTimeOffsetNanos() const {
+ // This will never be < 0 because mMaxLatenessNanos starts at
+ // mBurstPeriodNanos and only gets bigger.
+ return (mMaxMeasuredLatenessNanos - mBurstPeriodNanos) + kExtraLatenessNanos;
+}
+
+int64_t IsochronousClockModel::convertPositionToLatestTime(int64_t framePosition) const {
+ return convertPositionToTime(framePosition) + getLateTimeOffsetNanos();
+}
+
+int64_t IsochronousClockModel::convertLatestTimeToPosition(int64_t nanoTime) const {
+ return convertTimeToPosition(nanoTime - getLateTimeOffsetNanos());
+}
+
void IsochronousClockModel::dump() const {
ALOGD("mMarkerFramePosition = %lld", (long long) mMarkerFramePosition);
ALOGD("mMarkerNanoTime = %lld", (long long) mMarkerNanoTime);
ALOGD("mSampleRate = %6d", mSampleRate);
ALOGD("mFramesPerBurst = %6d", mFramesPerBurst);
- ALOGD("mMaxLatenessInNanos = %6d", mMaxLatenessInNanos);
+ ALOGD("mMaxMeasuredLatenessNanos = %6d", mMaxMeasuredLatenessNanos);
ALOGD("mState = %6d", mState);
}
diff --git a/media/libaaudio/src/client/IsochronousClockModel.h b/media/libaaudio/src/client/IsochronousClockModel.h
index 46ca48e..582bf4e 100644
--- a/media/libaaudio/src/client/IsochronousClockModel.h
+++ b/media/libaaudio/src/client/IsochronousClockModel.h
@@ -18,6 +18,7 @@
#define ANDROID_AAUDIO_ISOCHRONOUS_CLOCK_MODEL_H
#include <stdint.h>
+#include "utility/AudioClock.h"
namespace aaudio {
@@ -79,6 +80,15 @@
int64_t convertPositionToTime(int64_t framePosition) const;
/**
+ * Calculate the latest estimated time that the stream will be at that position.
+ * The more jittery the clock is then the later this will be.
+ *
+ * @param framePosition
+ * @return time in nanoseconds
+ */
+ int64_t convertPositionToLatestTime(int64_t framePosition) const;
+
+ /**
* Calculate an estimated position where the stream will be at the specified time.
*
* @param nanoTime time of interest
@@ -87,6 +97,18 @@
int64_t convertTimeToPosition(int64_t nanoTime) const;
/**
+ * Calculate the corresponding estimated position based on the specified time being
+ * the latest possible time.
+ *
+ * For the same nanoTime, this may return an earlier position than
+ * convertTimeToPosition().
+ *
+ * @param nanoTime
+ * @return position in frames
+ */
+ int64_t convertLatestTimeToPosition(int64_t nanoTime) const;
+
+ /**
* @param framesDelta difference in frames
* @return duration in nanoseconds
*/
@@ -101,6 +123,9 @@
void dump() const;
private:
+
+ int32_t getLateTimeOffsetNanos() const;
+
enum clock_model_state_t {
STATE_STOPPED,
STATE_STARTING,
@@ -108,13 +133,23 @@
STATE_RUNNING
};
+ // Amount of time to drift forward when we get a late timestamp.
+ // This value was calculated to allow tracking of a clock with 50 ppm error.
+ static constexpr int32_t kDriftNanos = 10 * 1000;
+ // TODO review value of kExtraLatenessNanos
+ static constexpr int32_t kExtraLatenessNanos = 100 * 1000;
+
int64_t mMarkerFramePosition;
int64_t mMarkerNanoTime;
int32_t mSampleRate;
int32_t mFramesPerBurst;
- int32_t mMaxLatenessInNanos;
+ int32_t mBurstPeriodNanos;
+ // Includes mBurstPeriodNanos because we sample randomly over time.
+ int32_t mMaxMeasuredLatenessNanos;
clock_model_state_t mState;
+ int32_t mTimestampCount = 0;
+
void update();
};
diff --git a/media/libmedia/Visualizer.cpp b/media/libmedia/Visualizer.cpp
index cb8d375..2bf0802 100644
--- a/media/libmedia/Visualizer.cpp
+++ b/media/libmedia/Visualizer.cpp
@@ -77,10 +77,13 @@
if (t != 0) {
if (enabled) {
if (t->exitPending()) {
+ mCaptureLock.unlock();
if (t->requestExitAndWait() == WOULD_BLOCK) {
+ mCaptureLock.lock();
ALOGE("Visualizer::enable() called from thread");
return INVALID_OPERATION;
}
+ mCaptureLock.lock();
}
}
t->mLock.lock();
diff --git a/media/libstagefright/omx/Android.bp b/media/libstagefright/omx/Android.bp
index e260cae..7d03d98 100644
--- a/media/libstagefright/omx/Android.bp
+++ b/media/libstagefright/omx/Android.bp
@@ -72,7 +72,6 @@
cfi: true,
},
- compile_multilib: "32",
}
cc_library_shared {
diff --git a/media/ndk/NdkImageReader.cpp b/media/ndk/NdkImageReader.cpp
index baa4fc7..830f752 100644
--- a/media/ndk/NdkImageReader.cpp
+++ b/media/ndk/NdkImageReader.cpp
@@ -113,12 +113,12 @@
void
AImageReader::FrameListener::onFrameAvailable(const BufferItem& /*item*/) {
- Mutex::Autolock _l(mLock);
sp<AImageReader> reader = mReader.promote();
if (reader == nullptr) {
ALOGW("A frame is available after AImageReader closed!");
return; // reader has been closed
}
+ Mutex::Autolock _l(mLock);
if (mListener.onImageAvailable == nullptr) {
return; // No callback registered
}
@@ -143,12 +143,12 @@
void
AImageReader::BufferRemovedListener::onBufferFreed(const wp<GraphicBuffer>& graphicBuffer) {
- Mutex::Autolock _l(mLock);
sp<AImageReader> reader = mReader.promote();
if (reader == nullptr) {
ALOGW("A frame is available after AImageReader closed!");
return; // reader has been closed
}
+ Mutex::Autolock _l(mLock);
if (mListener.onBufferRemoved == nullptr) {
return; // No callback registered
}
diff --git a/media/ndk/NdkImageReaderPriv.h b/media/ndk/NdkImageReaderPriv.h
index e328cb1..19bd704 100644
--- a/media/ndk/NdkImageReaderPriv.h
+++ b/media/ndk/NdkImageReaderPriv.h
@@ -134,7 +134,7 @@
private:
AImageReader_ImageListener mListener = {nullptr, nullptr};
- wp<AImageReader> mReader;
+ const wp<AImageReader> mReader;
Mutex mLock;
};
sp<FrameListener> mFrameListener;
@@ -149,7 +149,7 @@
private:
AImageReader_BufferRemovedListener mListener = {nullptr, nullptr};
- wp<AImageReader> mReader;
+ const wp<AImageReader> mReader;
Mutex mLock;
};
sp<BufferRemovedListener> mBufferRemovedListener;
diff --git a/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp b/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
index 96a8337..1f9b725 100644
--- a/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
@@ -333,9 +333,10 @@
if (encodedFormat != AUDIO_FORMAT_DEFAULT) {
moduleDevice->setEncodedFormat(encodedFormat);
}
- moduleDevice->setAddress(devAddress);
if (allowToCreate) {
moduleDevice->attach(hwModule);
+ moduleDevice->setAddress(devAddress);
+ moduleDevice->setName(String8(name));
}
return moduleDevice;
}
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.cpp b/services/camera/libcameraservice/common/CameraProviderManager.cpp
index 09638d0..98f9328 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.cpp
+++ b/services/camera/libcameraservice/common/CameraProviderManager.cpp
@@ -2058,6 +2058,13 @@
return OK;
}
bool CameraProviderManager::ProviderInfo::DeviceInfo3::isAPI1Compatible() const {
+ // Do not advertise NIR cameras to API1 camera app.
+ camera_metadata_ro_entry cfa = mCameraCharacteristics.find(
+ ANDROID_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT);
+ if (cfa.count == 1 && cfa.data.u8[0] == ANDROID_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT_NIR) {
+ return false;
+ }
+
bool isBackwardCompatible = false;
camera_metadata_ro_entry_t caps = mCameraCharacteristics.find(
ANDROID_REQUEST_AVAILABLE_CAPABILITIES);
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index a1a4958..4227a3b 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -4372,7 +4372,7 @@
int overrideFormat = mapToFrameworkFormat(src.v3_2.overrideFormat);
android_dataspace overrideDataSpace = mapToFrameworkDataspace(src.overrideDataSpace);
- if (dst->format != HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED) {
+ if (dstStream->getOriginalFormat() != HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED) {
dstStream->setFormatOverride(false);
dstStream->setDataSpaceOverride(false);
if (dst->format != overrideFormat) {
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.cpp b/services/camera/libcameraservice/device3/Camera3Stream.cpp
index 2df084b..fd9b4b0 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Stream.cpp
@@ -68,7 +68,7 @@
mLastMaxCount(Camera3StreamInterface::ALLOCATE_PIPELINE_MAX),
mBufferLimitLatency(kBufferLimitLatencyBinSize),
mFormatOverridden(false),
- mOriginalFormat(-1),
+ mOriginalFormat(format),
mDataSpaceOverridden(false),
mOriginalDataSpace(HAL_DATASPACE_UNKNOWN),
mPhysicalCameraId(physicalCameraId),
@@ -125,9 +125,6 @@
void Camera3Stream::setFormatOverride(bool formatOverridden) {
mFormatOverridden = formatOverridden;
- if (formatOverridden && mOriginalFormat == -1) {
- mOriginalFormat = camera3_stream::format;
- }
}
bool Camera3Stream::isFormatOverridden() const {
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.h b/services/camera/libcameraservice/device3/Camera3Stream.h
index 533318f..67afd0f 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.h
+++ b/services/camera/libcameraservice/device3/Camera3Stream.h
@@ -582,9 +582,9 @@
static const int32_t kBufferLimitLatencyBinSize = 33; //in ms
CameraLatencyHistogram mBufferLimitLatency;
- //Keep track of original format in case it gets overridden
+ //Keep track of original format when the stream is created in case it gets overridden
bool mFormatOverridden;
- int mOriginalFormat;
+ const int mOriginalFormat;
//Keep track of original dataSpace in case it gets overridden
bool mDataSpaceOverridden;