Merge changes from topic "orphan_effect_chain" into main
* changes:
Put orphan effect chain back if orphan effect creation failed
AudioEffect: prevent adding effect for unknown session on first io
Return non-zero channel mask for orphan effects
AudioFlinger: Keep track of music effect thread
diff --git a/drm/libdrmframework/plugins/forward-lock/FwdLockEngine/Android.bp b/drm/libdrmframework/plugins/forward-lock/FwdLockEngine/Android.bp
index 16ea15e..6e55a16 100644
--- a/drm/libdrmframework/plugins/forward-lock/FwdLockEngine/Android.bp
+++ b/drm/libdrmframework/plugins/forward-lock/FwdLockEngine/Android.bp
@@ -64,10 +64,6 @@
"libfwdlock-decoder",
],
- whole_static_libs: [
- "libc++fs",
- ],
-
local_include_dirs: ["include"],
relative_install_path: "drm",
diff --git a/media/audioaidlconversion/include/media/AidlConversionEffect.h b/media/audioaidlconversion/include/media/AidlConversionEffect.h
index b03d06b..e51bf8b 100644
--- a/media/audioaidlconversion/include/media/AidlConversionEffect.h
+++ b/media/audioaidlconversion/include/media/AidlConversionEffect.h
@@ -72,9 +72,6 @@
MAKE_EXTENSION_PARAMETER_ID(_effect, _tag##Tag, _extId); \
aidl::android::hardware::audio::effect::Parameter _aidlParam; \
RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(mEffect->getParameter(_id, &_aidlParam))); \
- aidl::android::hardware::audio::effect::VendorExtension _ext = \
- VALUE_OR_RETURN_STATUS(GET_PARAMETER_SPECIFIC_FIELD( \
- _aidlParam, _effect, _tag, _effect::vendor, VendorExtension)); \
return VALUE_OR_RETURN_STATUS( \
aidl::android::aidl2legacy_Parameter_EffectParameterWriter(_aidlParam, _param)); \
}
diff --git a/media/codec2/hal/client/GraphicsTracker.cpp b/media/codec2/hal/client/GraphicsTracker.cpp
index f80809a..95f5a6e 100644
--- a/media/codec2/hal/client/GraphicsTracker.cpp
+++ b/media/codec2/hal/client/GraphicsTracker.cpp
@@ -673,6 +673,15 @@
ALOGW("BQ might not be ready for dequeueBuffer()");
return C2_BLOCKING;
}
+ bool cacheExpired = false;
+ {
+ std::unique_lock<std::mutex> l(mLock);
+ cacheExpired = (mBufferCache.get() != cache.get());
+ }
+ if (cacheExpired) {
+ ALOGW("a new BQ is configured. dequeueBuffer() error %d", (int)status);
+ return C2_BLOCKING;
+ }
ALOGE("BQ in inconsistent status. dequeueBuffer() error %d", (int)status);
return C2_CORRUPTED;
}
diff --git a/media/codec2/hal/common/MultiAccessUnitHelper.cpp b/media/codec2/hal/common/MultiAccessUnitHelper.cpp
index b1fa82f..55bf7a9 100644
--- a/media/codec2/hal/common/MultiAccessUnitHelper.cpp
+++ b/media/codec2/hal/common/MultiAccessUnitHelper.cpp
@@ -497,9 +497,10 @@
// This is to take care of the last bytes and to decide to send with
// FLAG_INCOMPLETE or not.
if ((frame->mWview
- && (frame->mWview->offset() > frame->mLargeFrameTuning.thresholdSize))
+ && (frame->mWview->offset() >= frame->mLargeFrameTuning.thresholdSize))
|| frame->mComponentFrameIds.empty()) {
if (frame->mLargeWork) {
+ frame->mLargeWork->result = C2_OK;
finalizeWork(*frame);
addOutWork(frame->mLargeWork);
frame->reset();
@@ -558,12 +559,15 @@
c2_status_t ret = C2_OK;
if (frame.mLargeWork == nullptr) {
frame.mLargeWork.reset(new C2Work);
+ frame.mLargeWork->result = C2_OK;
+ frame.mLargeWork->input.flags = (C2FrameData::flags_t)0;
frame.mLargeWork->input.ordinal = frame.inOrdinal;
frame.mLargeWork->input.ordinal.frameIndex = frame.inOrdinal.frameIndex;
}
if (allocateWorket) {
if (frame.mLargeWork->worklets.size() == 0) {
frame.mLargeWork->worklets.emplace_back(new C2Worklet);
+ frame.mLargeWork->worklets.back()->output.flags = (C2FrameData::flags_t)0;
}
}
if (allocateBuffer) {
@@ -611,6 +615,9 @@
if (c2ret != C2_OK) {
return c2ret;
}
+ uint32_t flags = work->input.flags;
+ flags |= frame.mLargeWork->input.flags;
+ frame.mLargeWork->input.flags = (C2FrameData::flags_t)flags;
C2FrameData& outputFramedata = frame.mLargeWork->worklets.front()->output;
if (!(*worklet)->output.configUpdate.empty()) {
for (auto& configUpdate : (*worklet)->output.configUpdate) {
@@ -678,6 +685,9 @@
}
}
allocateWork(frame, true, true);
+ uint32_t flags = work->input.flags;
+ flags |= frame.mLargeWork->input.flags;
+ frame.mLargeWork->input.flags = (C2FrameData::flags_t)flags;
C2ReadView rView = blocks.front().map().get();
if (rView.error()) {
LOG(ERROR) << "Buffer read view error";
@@ -744,7 +754,8 @@
}
LOG(DEBUG) << "Finalizing work with input Idx "
<< frame.mLargeWork->input.ordinal.frameIndex.peekull()
- << " timestamp " << timeStampUs;
+ << " timestamp " << timeStampUs
+ << " inFlags " << inFlags;
uint32_t finalFlags = 0;
if ((!forceComplete)
&& (frame.mLargeWork->result == C2_OK)
diff --git a/media/codec2/vndk/include/C2BqBufferPriv.h b/media/codec2/vndk/include/C2BqBufferPriv.h
index 1e8dd40..806932c 100644
--- a/media/codec2/vndk/include/C2BqBufferPriv.h
+++ b/media/codec2/vndk/include/C2BqBufferPriv.h
@@ -44,7 +44,77 @@
*
* If a specific HGBP is configured, the HGBP acts as an allocator for creating graphic blocks.
*
- * TODO: add more ducumentation(graphic block life-cycle, waitable object and workaounds)
+ *
+ * HGBP/IGBP and the BlockPool
+ *
+ * GraphicBuffer(s) from BufferQueue(IGBP/IGBC) are based on slot id.
+ * A created GraphicBuffer occupies a slot(so the GraphicBuffer has a slot-id).
+ * A GraphicBuffer is produced and consumed and recyled based on the slot-id
+ * w.r.t. BufferQueue.
+ *
+ * HGBP::dequeueBuffer() returns a slot id where the slot has an available GraphicBuffer.
+ * If it is necessary, HGBP allocates a new GraphicBuffer to the slot and indicates
+ * that a new buffer is allocated as return flag.
+ * To retrieve the GraphicBuffer, HGBP::requestBuffer() along with the slot id
+ * is required. In order to save HGBP remote calls, the blockpool caches the
+ * allocated GraphicBuffer(s) along with the slot information.
+ *
+ * The blockpool provides C2GraphicBlock upon \fetchGraphicBlock().
+ * The C2GraphicBlock has a native handle, which is extracted from a GraphicBuffer
+ * and then cloned for independent life-cycle with the GraphicBuffer. The GraphicBuffer
+ * is allocated by HGBP::dequeueBuffer() and retrieved by HGBP::requestBuffer()
+ * if there is a HGBP configured.
+ *
+ *
+ * Life-cycle of C2GraphicBlock
+ *
+ * The decoder HAL writes a decoded frame into C2GraphicBlock. Upon
+ * completion, the component sends the block to the client in the remote process
+ * (i.e. to MediaCodec). The remote process renders the frame into the output surface
+ * via IGBP::queueBuffer() (Note: this is not hidlized.).
+ *
+ * If the decoder HAL destroys the C2GraphicBlock without transferring to the
+ * client, the destroy request goes to the BlockPool. Then
+ * the BlockPool free the associated GraphicBuffer from a slot to
+ * HGBP in order to recycle via HGBP::cancelBuffer().
+ *
+ *
+ * Clearing the Cache(GraphicBuffer)
+ *
+ * When the output surface is switched to a new surface, The GraphicBuffers from
+ * the old surface is either migrated or cleared.
+ *
+ * The GraphicBuffer(s) still in use are migrated to a new surface during
+ * configuration via HGBP::attachBuffer(). The GraphicBuffer(s) not in use are
+ * cleared from the cache inside the BlockPool.
+ *
+ * When the surface is switched to a null surface, all the
+ * GraphicBuffers in the cache are cleared.
+ *
+ *
+ * Workaround w.r.t. b/322731059 (Deferring cleaning the cache)
+ *
+ * Some vendor devices have issues with graphic buffer lifecycle management,
+ * where the graphic buffers get released even when the cloned native handles
+ * in the remote process are not closed yet. This issue led to rare crashes
+ * for those devices when the cache is cleared early.
+ *
+ * We workarounded the crash by deferring the cleaning of the cache.
+ * The workaround is not enabled by default, and can be enabled via a
+ * system property as shown below:
+ *
+ * 'debug.codec2.bqpool_dealloc_after_stop' = 1
+ *
+ * Configuring the debug flag will call \::setDeferDeallocationAfterStop()
+ * after the blockpool creation. This will enable the deferring.
+ *
+ * After enabling the deferring, clearing the GraphicBuffer is delayed until
+ * 1) \::clearDeferredBlocks() is called.
+ * Typically after HAL processes stop() request.
+ * 2) Or a new ::fetchGraphicBlock() is called.
+ *
+ * Since the deferring will delay the deallocation, the deferring will result
+ * in more memory consumption during the brief period.
*/
class C2BufferQueueBlockPool : public C2BlockPool {
public:
diff --git a/media/libaaudio/include/aaudio/AAudio.h b/media/libaaudio/include/aaudio/AAudio.h
index 9d9b574..f6ddc3e 100644
--- a/media/libaaudio/include/aaudio/AAudio.h
+++ b/media/libaaudio/include/aaudio/AAudio.h
@@ -574,7 +574,7 @@
* For privacy, the following usages can not be recorded: AAUDIO_VOICE_COMMUNICATION*,
* AAUDIO_USAGE_NOTIFICATION*, AAUDIO_USAGE_ASSISTANCE* and {@link #AAUDIO_USAGE_ASSISTANT}.
*
- * On <a href="/reference/android/os/Build.VERSION_CODES#Q">Build.VERSION_CODES</a>,
+ * On <a href="/reference/android/os/Build.VERSION_CODES#Q">Q</a>,
* this means only {@link #AAUDIO_USAGE_MEDIA} and {@link #AAUDIO_USAGE_GAME} may be captured.
*
* See <a href="/reference/android/media/AudioAttributes.html#ALLOW_CAPTURE_BY_ALL">
diff --git a/media/libaaudio/src/core/AAudioStreamParameters.cpp b/media/libaaudio/src/core/AAudioStreamParameters.cpp
index 3e51575..67fc668 100644
--- a/media/libaaudio/src/core/AAudioStreamParameters.cpp
+++ b/media/libaaudio/src/core/AAudioStreamParameters.cpp
@@ -23,13 +23,6 @@
using namespace aaudio;
-// TODO These defines should be moved to a central place in audio.
-#define SAMPLES_PER_FRAME_MIN 1
-#define SAMPLES_PER_FRAME_MAX FCC_LIMIT
-#define SAMPLE_RATE_HZ_MIN 8000
-// HDMI supports up to 32 channels at 1536000 Hz.
-#define SAMPLE_RATE_HZ_MAX 1600000
-
void AAudioStreamParameters::copyFrom(const AAudioStreamParameters &other) {
mSamplesPerFrame = other.mSamplesPerFrame;
mSampleRate = other.mSampleRate;
@@ -73,8 +66,8 @@
}
aaudio_result_t AAudioStreamParameters::validate() const {
- if (mSamplesPerFrame != AAUDIO_UNSPECIFIED
- && (mSamplesPerFrame < SAMPLES_PER_FRAME_MIN || mSamplesPerFrame > SAMPLES_PER_FRAME_MAX)) {
+ if (mSamplesPerFrame != AAUDIO_UNSPECIFIED && (mSamplesPerFrame < CHANNEL_COUNT_MIN_AAUDIO ||
+ mSamplesPerFrame > CHANNEL_COUNT_MAX_AAUDIO)) {
ALOGD("channelCount out of range = %d", mSamplesPerFrame);
return AAUDIO_ERROR_OUT_OF_RANGE;
}
@@ -105,8 +98,8 @@
aaudio_result_t result = isFormatValid (mAudioFormat);
if (result != AAUDIO_OK) return result;
- if (mSampleRate != AAUDIO_UNSPECIFIED
- && (mSampleRate < SAMPLE_RATE_HZ_MIN || mSampleRate > SAMPLE_RATE_HZ_MAX)) {
+ if (mSampleRate != AAUDIO_UNSPECIFIED &&
+ (mSampleRate < SAMPLE_RATE_HZ_MIN_AAUDIO || mSampleRate > SAMPLE_RATE_HZ_MAX_IEC610937)) {
ALOGD("sampleRate out of range = %d", mSampleRate);
return AAUDIO_ERROR_INVALID_RATE;
}
diff --git a/media/libaaudio/src/core/AudioStreamBuilder.cpp b/media/libaaudio/src/core/AudioStreamBuilder.cpp
index ac4e2b3..4f32883 100644
--- a/media/libaaudio/src/core/AudioStreamBuilder.cpp
+++ b/media/libaaudio/src/core/AudioStreamBuilder.cpp
@@ -46,15 +46,6 @@
#define AAUDIO_MMAP_POLICY_DEFAULT AAUDIO_POLICY_NEVER
#define AAUDIO_MMAP_EXCLUSIVE_POLICY_DEFAULT AAUDIO_POLICY_NEVER
-// These values are for a pre-check before we ask the lower level service to open a stream.
-// So they are just outside the maximum conceivable range of value,
-// on the edge of being ridiculous.
-// TODO These defines should be moved to a central place in audio.
-#define SAMPLES_PER_FRAME_MIN 1
-#define SAMPLES_PER_FRAME_MAX FCC_LIMIT
-#define SAMPLE_RATE_HZ_MIN 8000
-// HDMI supports up to 32 channels at 1536000 Hz.
-#define SAMPLE_RATE_HZ_MAX 1600000
#define FRAMES_PER_DATA_CALLBACK_MIN 1
#define FRAMES_PER_DATA_CALLBACK_MAX (1024 * 1024)
diff --git a/media/libaudioclient/tests/audio_test_utils.cpp b/media/libaudioclient/tests/audio_test_utils.cpp
index 745c7d1..1599839 100644
--- a/media/libaudioclient/tests/audio_test_utils.cpp
+++ b/media/libaudioclient/tests/audio_test_utils.cpp
@@ -294,6 +294,7 @@
ts.getBestTimestamp(&position, &timeNs, ExtendedTimestamp::TIMEBASE_MONOTONIC, &location) ==
OK) {
// Use audio timestamp.
+ std::lock_guard l(mMutex);
timeUs = timeNs / 1000 -
(position - mNumFramesReceived + mNumFramesLost) * usPerSec / mSampleRate;
} else {
@@ -322,6 +323,7 @@
} else {
numLostBytes = 0;
}
+ std::lock_guard l(mMutex);
const int64_t timestampUs =
((1000000LL * mNumFramesReceived) + (mRecord->getSampleRate() >> 1)) /
mRecord->getSampleRate();
@@ -335,6 +337,7 @@
if (buffer.size() == 0) {
ALOGW("Nothing is available from AudioRecord callback buffer");
} else {
+ std::lock_guard l(mMutex);
const size_t bufferSize = buffer.size();
const int64_t timestampUs =
((1000000LL * mNumFramesReceived) + (mRecord->getSampleRate() >> 1)) /
@@ -359,17 +362,24 @@
void AudioCapture::onOverrun() {
ALOGV("received event overrun");
- mBufferOverrun = true;
}
void AudioCapture::onMarker(uint32_t markerPosition) {
ALOGV("received Callback at position %d", markerPosition);
- mReceivedCbMarkerAtPosition = markerPosition;
+ {
+ std::lock_guard l(mMutex);
+ mReceivedCbMarkerAtPosition = markerPosition;
+ }
+ mMarkerCondition.notify_all();
}
void AudioCapture::onNewPos(uint32_t markerPosition) {
ALOGV("received Callback at position %d", markerPosition);
- mReceivedCbMarkerCount++;
+ {
+ std::lock_guard l(mMutex);
+ mReceivedCbMarkerCount = mReceivedCbMarkerCount.value_or(0) + 1;
+ }
+ mMarkerCondition.notify_all();
}
void AudioCapture::onNewIAudioRecord() {
@@ -387,20 +397,7 @@
mFlags(flags),
mSessionId(sessionId),
mTransferType(transferType),
- mAttributes(attributes) {
- mFrameCount = 0;
- mNotificationFrames = 0;
- mNumFramesToRecord = 0;
- mNumFramesReceived = 0;
- mNumFramesLost = 0;
- mBufferOverrun = false;
- mMarkerPosition = 0;
- mMarkerPeriod = 0;
- mReceivedCbMarkerAtPosition = -1;
- mReceivedCbMarkerCount = 0;
- mState = REC_NO_INIT;
- mStopRecording = false;
-}
+ mAttributes(attributes) {}
AudioCapture::~AudioCapture() {
if (mOutFileFd > 0) close(mOutFileFd);
@@ -531,25 +528,32 @@
const int maxTries = MAX_WAIT_TIME_MS / WAIT_PERIOD_MS;
int counter = 0;
size_t nonContig = 0;
- while (mNumFramesReceived < mNumFramesToRecord) {
+ int64_t numFramesReceived;
+ {
+ std::lock_guard l(mMutex);
+ numFramesReceived = mNumFramesReceived;
+ }
+ while (numFramesReceived < mNumFramesToRecord) {
AudioRecord::Buffer recordBuffer;
recordBuffer.frameCount = mNotificationFrames;
status_t status = mRecord->obtainBuffer(&recordBuffer, 1, &nonContig);
if (OK == status) {
const int64_t timestampUs =
- ((1000000LL * mNumFramesReceived) + (mRecord->getSampleRate() >> 1)) /
+ ((1000000LL * numFramesReceived) + (mRecord->getSampleRate() >> 1)) /
mRecord->getSampleRate();
RawBuffer buff{-1, timestampUs, static_cast<int32_t>(recordBuffer.size())};
memcpy(buff.mData.get(), recordBuffer.data(), recordBuffer.size());
buffer = std::move(buff);
- mNumFramesReceived += recordBuffer.size() / mRecord->frameSize();
+ numFramesReceived += recordBuffer.size() / mRecord->frameSize();
mRecord->releaseBuffer(&recordBuffer);
counter = 0;
} else if (WOULD_BLOCK == status) {
// if not received a buffer for MAX_WAIT_TIME_MS, something has gone wrong
- if (counter == maxTries) return TIMED_OUT;
- counter++;
+ if (counter++ == maxTries) status = TIMED_OUT;
}
+ std::lock_guard l(mMutex);
+ mNumFramesReceived = numFramesReceived;
+ if (TIMED_OUT == status) return status;
}
return OK;
}
@@ -577,7 +581,12 @@
status_t AudioCapture::audioProcess() {
RawBuffer buffer;
status_t status = OK;
- while (mNumFramesReceived < mNumFramesToRecord && status == OK) {
+ int64_t numFramesReceived;
+ {
+ std::lock_guard l(mMutex);
+ numFramesReceived = mNumFramesReceived;
+ }
+ while (numFramesReceived < mNumFramesToRecord && status == OK) {
if (mTransferType == AudioRecord::TRANSFER_CALLBACK)
status = obtainBufferCb(buffer);
else
@@ -586,10 +595,52 @@
const char* ptr = static_cast<const char*>(static_cast<void*>(buffer.mData.get()));
write(mOutFileFd, ptr, buffer.mCapacity);
}
+ std::lock_guard l(mMutex);
+ numFramesReceived = mNumFramesReceived;
}
return OK;
}
+uint32_t AudioCapture::getMarkerPeriod() const {
+ std::lock_guard l(mMutex);
+ return mMarkerPeriod;
+}
+
+uint32_t AudioCapture::getMarkerPosition() const {
+ std::lock_guard l(mMutex);
+ return mMarkerPosition;
+}
+
+void AudioCapture::setMarkerPeriod(uint32_t markerPeriod) {
+ std::lock_guard l(mMutex);
+ mMarkerPeriod = markerPeriod;
+}
+
+void AudioCapture::setMarkerPosition(uint32_t markerPosition) {
+ std::lock_guard l(mMutex);
+ mMarkerPosition = markerPosition;
+}
+
+uint32_t AudioCapture::waitAndGetReceivedCbMarkerAtPosition() const {
+ std::unique_lock lock(mMutex);
+ android::base::ScopedLockAssertion lock_assertion(mMutex);
+ mMarkerCondition.wait_for(lock, std::chrono::seconds(3), [this]() {
+ android::base::ScopedLockAssertion lock_assertion(mMutex);
+ return mReceivedCbMarkerAtPosition.has_value();
+ });
+ return mReceivedCbMarkerAtPosition.value_or(~0);
+}
+
+uint32_t AudioCapture::waitAndGetReceivedCbMarkerCount() const {
+ std::unique_lock lock(mMutex);
+ android::base::ScopedLockAssertion lock_assertion(mMutex);
+ mMarkerCondition.wait_for(lock, std::chrono::seconds(3), [this]() {
+ android::base::ScopedLockAssertion lock_assertion(mMutex);
+ return mReceivedCbMarkerCount.has_value();
+ });
+ return mReceivedCbMarkerCount.value_or(0);
+}
+
status_t listAudioPorts(std::vector<audio_port_v7>& portsVec) {
int attempts = 5;
status_t status;
diff --git a/media/libaudioclient/tests/audio_test_utils.h b/media/libaudioclient/tests/audio_test_utils.h
index 40c3365..022ecf3 100644
--- a/media/libaudioclient/tests/audio_test_utils.h
+++ b/media/libaudioclient/tests/audio_test_utils.h
@@ -146,8 +146,8 @@
~AudioCapture();
size_t onMoreData(const AudioRecord::Buffer& buffer) override EXCLUDES(mMutex);
void onOverrun() override;
- void onMarker(uint32_t markerPosition) override;
- void onNewPos(uint32_t newPos) override;
+ void onMarker(uint32_t markerPosition) override EXCLUDES(mMutex);
+ void onNewPos(uint32_t newPos) override EXCLUDES(mMutex);
void onNewIAudioRecord() override;
status_t create();
status_t setRecordDuration(float durationInSec);
@@ -157,20 +157,19 @@
status_t start(AudioSystem::sync_event_t event = AudioSystem::SYNC_EVENT_NONE,
audio_session_t triggerSession = AUDIO_SESSION_NONE);
status_t obtainBufferCb(RawBuffer& buffer) EXCLUDES(mMutex);
- status_t obtainBuffer(RawBuffer& buffer);
- status_t audioProcess();
+ status_t obtainBuffer(RawBuffer& buffer) EXCLUDES(mMutex);
+ status_t audioProcess() EXCLUDES(mMutex);
status_t stop() EXCLUDES(mMutex);
+ uint32_t getMarkerPeriod() const EXCLUDES(mMutex);
+ uint32_t getMarkerPosition() const EXCLUDES(mMutex);
+ void setMarkerPeriod(uint32_t markerPeriod) EXCLUDES(mMutex);
+ void setMarkerPosition(uint32_t markerPosition) EXCLUDES(mMutex);
+ uint32_t waitAndGetReceivedCbMarkerAtPosition() const EXCLUDES(mMutex);
+ uint32_t waitAndGetReceivedCbMarkerCount() const EXCLUDES(mMutex);
- uint32_t mFrameCount;
- uint32_t mNotificationFrames;
- int64_t mNumFramesToRecord;
- int64_t mNumFramesReceived;
- int64_t mNumFramesLost;
- uint32_t mMarkerPosition;
- uint32_t mMarkerPeriod;
- uint32_t mReceivedCbMarkerAtPosition;
- uint32_t mReceivedCbMarkerCount;
- bool mBufferOverrun;
+ uint32_t mFrameCount = 0;
+ uint32_t mNotificationFrames = 0;
+ int64_t mNumFramesToRecord = 0;
enum State {
REC_NO_INIT,
@@ -191,14 +190,23 @@
size_t mMaxBytesPerCallback = 2048;
sp<AudioRecord> mRecord;
- State mState;
- bool mStopRecording GUARDED_BY(mMutex);
+ State mState = REC_NO_INIT;
+ bool mStopRecording GUARDED_BY(mMutex) = false;
std::string mFileName;
int mOutFileFd = -1;
mutable std::mutex mMutex;
std::condition_variable mCondition;
std::deque<RawBuffer> mBuffersReceived GUARDED_BY(mMutex);
+
+ mutable std::condition_variable mMarkerCondition;
+ uint32_t mMarkerPeriod GUARDED_BY(mMutex) = 0;
+ uint32_t mMarkerPosition GUARDED_BY(mMutex) = 0;
+ std::optional<uint32_t> mReceivedCbMarkerCount GUARDED_BY(mMutex);
+ std::optional<uint32_t> mReceivedCbMarkerAtPosition GUARDED_BY(mMutex);
+
+ int64_t mNumFramesReceived GUARDED_BY(mMutex) = 0;
+ int64_t mNumFramesLost GUARDED_BY(mMutex) = 0;
};
#endif // AUDIO_TEST_UTILS_H_
diff --git a/media/libaudioclient/tests/audiorecord_tests.cpp b/media/libaudioclient/tests/audiorecord_tests.cpp
index d122508..68b0e7b 100644
--- a/media/libaudioclient/tests/audiorecord_tests.cpp
+++ b/media/libaudioclient/tests/audiorecord_tests.cpp
@@ -83,7 +83,10 @@
}
void TearDown() override {
- if (mAC) ASSERT_EQ(OK, mAC->stop());
+ if (mAC) {
+ ASSERT_EQ(OK, mAC->stop());
+ mAC.clear();
+ }
}
};
@@ -149,33 +152,33 @@
}
TEST_F(AudioRecordTest, TestGetSetMarker) {
- mAC->mMarkerPosition = (mAC->mNotificationFrames << 3) + (mAC->mNotificationFrames >> 1);
- EXPECT_EQ(OK, mAC->getAudioRecordHandle()->setMarkerPosition(mAC->mMarkerPosition))
+ mAC->setMarkerPosition((mAC->mNotificationFrames << 3) + (mAC->mNotificationFrames >> 1));
+ EXPECT_EQ(OK, mAC->getAudioRecordHandle()->setMarkerPosition(mAC->getMarkerPosition()))
<< "setMarkerPosition() failed";
uint32_t marker;
EXPECT_EQ(OK, mAC->getAudioRecordHandle()->getMarkerPosition(&marker))
<< "getMarkerPosition() failed";
EXPECT_EQ(OK, mAC->start()) << "start recording failed";
EXPECT_EQ(OK, mAC->audioProcess()) << "audioProcess failed";
- // TODO(b/348658586): Properly synchronize callback updates with the test thread.
- EXPECT_EQ(marker, mAC->mMarkerPosition)
+ EXPECT_EQ(marker, mAC->getMarkerPosition())
<< "configured marker and received marker are different";
- EXPECT_EQ(mAC->mReceivedCbMarkerAtPosition, mAC->mMarkerPosition)
+ EXPECT_EQ(mAC->waitAndGetReceivedCbMarkerAtPosition(), mAC->getMarkerPosition())
<< "configured marker and received cb marker are different";
}
TEST_F(AudioRecordTest, TestGetSetMarkerPeriodical) {
- mAC->mMarkerPeriod = (mAC->mNotificationFrames << 3) + (mAC->mNotificationFrames >> 1);
- EXPECT_EQ(OK, mAC->getAudioRecordHandle()->setPositionUpdatePeriod(mAC->mMarkerPeriod))
+ mAC->setMarkerPeriod((mAC->mNotificationFrames << 3) + (mAC->mNotificationFrames >> 1));
+ EXPECT_EQ(OK, mAC->getAudioRecordHandle()->setPositionUpdatePeriod(mAC->getMarkerPeriod()))
<< "setPositionUpdatePeriod() failed";
uint32_t marker;
EXPECT_EQ(OK, mAC->getAudioRecordHandle()->getPositionUpdatePeriod(&marker))
<< "getPositionUpdatePeriod() failed";
EXPECT_EQ(OK, mAC->start()) << "start recording failed";
EXPECT_EQ(OK, mAC->audioProcess()) << "audioProcess failed";
- // TODO(b/348658586): Properly synchronize callback updates with the test thread.
- EXPECT_EQ(marker, mAC->mMarkerPeriod) << "configured marker and received marker are different";
- EXPECT_EQ(mAC->mReceivedCbMarkerCount, mAC->mNumFramesToRecord / mAC->mMarkerPeriod)
+ EXPECT_EQ(marker, mAC->getMarkerPeriod())
+ << "configured marker and received marker are different";
+ EXPECT_EQ(mAC->waitAndGetReceivedCbMarkerCount(),
+ mAC->mNumFramesToRecord / mAC->getMarkerPeriod())
<< "configured marker and received cb marker are different";
}
@@ -202,12 +205,12 @@
EXPECT_EQ(mSessionId, mAC->getAudioRecordHandle()->getSessionId());
if (mTransferType != AudioRecord::TRANSFER_CALLBACK) {
uint32_t marker;
- mAC->mMarkerPosition = (mAC->mNotificationFrames << 3) + (mAC->mNotificationFrames >> 1);
+ mAC->setMarkerPosition((mAC->mNotificationFrames << 3) + (mAC->mNotificationFrames >> 1));
EXPECT_EQ(INVALID_OPERATION,
- mAC->getAudioRecordHandle()->setMarkerPosition(mAC->mMarkerPosition));
+ mAC->getAudioRecordHandle()->setMarkerPosition(mAC->getMarkerPosition()));
EXPECT_EQ(OK, mAC->getAudioRecordHandle()->getMarkerPosition(&marker));
EXPECT_EQ(INVALID_OPERATION,
- mAC->getAudioRecordHandle()->setPositionUpdatePeriod(mAC->mMarkerPosition));
+ mAC->getAudioRecordHandle()->setPositionUpdatePeriod(mAC->getMarkerPosition()));
EXPECT_EQ(OK, mAC->getAudioRecordHandle()->getPositionUpdatePeriod(&marker));
}
EXPECT_EQ(OK, mAC->start()) << "start recording failed";
diff --git a/media/libmedia/Android.bp b/media/libmedia/Android.bp
index e5323a6..d4b4b6c 100644
--- a/media/libmedia/Android.bp
+++ b/media/libmedia/Android.bp
@@ -278,10 +278,6 @@
"android.media.codec-aconfig-cc",
],
- include_dirs: [
- "system/libhidl/transport/token/1.0/utils/include",
- ],
-
export_include_dirs: [
"include",
],
diff --git a/media/libmedia/MediaCodecInfo.cpp b/media/libmedia/MediaCodecInfo.cpp
index c45c5c3..d5d1a09 100644
--- a/media/libmedia/MediaCodecInfo.cpp
+++ b/media/libmedia/MediaCodecInfo.cpp
@@ -18,8 +18,6 @@
#define LOG_TAG "MediaCodecInfo"
#include <utils/Log.h>
-#include <media/IOMX.h>
-
#include <media/MediaCodecInfo.h>
#include <media/stagefright/foundation/ADebug.h>
diff --git a/media/libmediaplayerservice/StagefrightRecorder.cpp b/media/libmediaplayerservice/StagefrightRecorder.cpp
index 3ab32f0..736dd8a 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.cpp
+++ b/media/libmediaplayerservice/StagefrightRecorder.cpp
@@ -1334,10 +1334,10 @@
// cause out-of-memory due to large input buffer size. And audio recording
// probably doesn't make sense in the scenario, since the slow-down factor
// is probably huge (eg. mSampleRate=48K, mCaptureFps=240, mFrameRate=1).
- const static int32_t SAMPLE_RATE_HZ_MAX = 192000;
+ const static int32_t kSampleRateHzMax = 192000;
sourceSampleRate =
(mSampleRate * mCaptureFps + mFrameRate / 2) / mFrameRate;
- if (sourceSampleRate < mSampleRate || sourceSampleRate > SAMPLE_RATE_HZ_MAX) {
+ if (sourceSampleRate < mSampleRate || sourceSampleRate > kSampleRateHzMax) {
ALOGE("source sample rate out of range! "
"(mSampleRate %d, mCaptureFps %.2f, mFrameRate %d",
mSampleRate, mCaptureFps, mFrameRate);
diff --git a/media/libstagefright/MPEG4Writer.cpp b/media/libstagefright/MPEG4Writer.cpp
index e918b5e..15188b0 100644
--- a/media/libstagefright/MPEG4Writer.cpp
+++ b/media/libstagefright/MPEG4Writer.cpp
@@ -72,6 +72,9 @@
static const int64_t kMaxMetadataSize = 0x4000000LL; // 64MB max per-frame metadata size
static const int64_t kMaxCttsOffsetTimeUs = 30 * 60 * 1000000LL; // 30 minutes
static const size_t kESDSScratchBufferSize = 10; // kMaxAtomSize in Mpeg4Extractor 64MB
+// Allow up to 100 milli second, which is safely above the maximum delay observed in manual testing
+// between posting from setNextFd and handling it
+static const int64_t kFdCondWaitTimeoutNs = 100000000;
static const char kMetaKey_Version[] = "com.android.version";
static const char kMetaKey_Manufacturer[] = "com.android.manufacturer";
@@ -1262,9 +1265,13 @@
return OK;
}
+ // Wait for the signal only if the new file is not available.
if (mNextFd == -1) {
- ALOGW("No FileDescriptor for next recording");
- return INVALID_OPERATION;
+ status_t res = mFdCond.waitRelative(mLock, kFdCondWaitTimeoutNs);
+ if (res != OK) {
+ ALOGW("No FileDescriptor for next recording");
+ return INVALID_OPERATION;
+ }
}
mSwitchPending = true;
@@ -2433,6 +2440,7 @@
return INVALID_OPERATION;
}
mNextFd = dup(fd);
+ mFdCond.signal();
return OK;
}
@@ -4886,8 +4894,15 @@
int32_t mediaTime = (mFirstSampleStartOffsetUs * mTimeScale + 5E5) / 1E6;
int32_t firstSampleOffsetTicks =
(mFirstSampleStartOffsetUs * mvhdTimeScale + 5E5) / 1E6;
- // samples before 0 don't count in for duration, hence subtract firstSampleOffsetTicks.
- addOneElstTableEntry(tkhdDurationTicks - firstSampleOffsetTicks, mediaTime, 1, 0);
+ if (tkhdDurationTicks >= firstSampleOffsetTicks) {
+ // samples before 0 don't count in for duration, hence subtract
+ // firstSampleOffsetTicks.
+ addOneElstTableEntry(tkhdDurationTicks - firstSampleOffsetTicks, mediaTime, 1, 0);
+ } else {
+ ALOGW("The track header duration %" PRId64
+ " is smaller than the first sample offset %" PRId64,
+ mTrackDurationUs, mFirstSampleStartOffsetUs);
+ }
} else {
// Track starting at zero.
ALOGV("No edit list entry required for this track");
diff --git a/media/libstagefright/include/media/stagefright/MPEG4Writer.h b/media/libstagefright/include/media/stagefright/MPEG4Writer.h
index 054a4b8..ee75129 100644
--- a/media/libstagefright/include/media/stagefright/MPEG4Writer.h
+++ b/media/libstagefright/include/media/stagefright/MPEG4Writer.h
@@ -144,6 +144,7 @@
std::mutex mFallocMutex;
bool mPreAllocFirstTime; // Pre-allocate space for file and track headers only once per file.
uint64_t mPrevAllTracksTotalMetaDataSizeEstimate;
+ Condition mFdCond;
List<Track *> mTracks;
diff --git a/media/ndk/include/media/NdkMediaDataSource.h b/media/ndk/include/media/NdkMediaDataSource.h
index 197e202..def142c 100644
--- a/media/ndk/include/media/NdkMediaDataSource.h
+++ b/media/ndk/include/media/NdkMediaDataSource.h
@@ -49,16 +49,16 @@
/*
* AMediaDataSource's callbacks will be invoked on an implementation-defined thread
* or thread pool. No guarantees are provided about which thread(s) will be used for
- * callbacks. For example, |close| can be invoked from a different thread than the
- * thread invoking |readAt|. As such, the Implementations of AMediaDataSource callbacks
+ * callbacks. For example, `close` can be invoked from a different thread than the
+ * thread invoking `readAt`. As such, the Implementations of AMediaDataSource callbacks
* must be threadsafe.
*/
/**
- * Called to request data from the given |offset|.
+ * Called to request data from the given `offset`.
*
- * Implementations should should write up to |size| bytes into
- * |buffer|, and return the number of bytes written.
+ * Implementations should should write up to `size` bytes into
+ * `buffer`, and return the number of bytes written.
*
* Return 0 if size is zero (thus no bytes are read).
*
@@ -78,9 +78,9 @@
* Called to close the data source, unblock reads, and release associated
* resources.
*
- * The NDK media framework guarantees that after the first |close| is
+ * The NDK media framework guarantees that after the first `close` is
* called, no future callbacks will be invoked on the data source except
- * for |close| itself.
+ * for `close` itself.
*
* Closing a data source allows readAt calls that were blocked waiting
* for I/O data to return promptly.
@@ -101,7 +101,7 @@
/**
* Called to get an estimate of the number of bytes that can be read from this data source
- * starting at |offset| without blocking for I/O.
+ * starting at `offset` without blocking for I/O.
*
* Return -1 when such an estimate is not possible.
*/
@@ -111,10 +111,10 @@
* Create new media data source. Returns NULL if memory allocation
* for the new data source object fails.
*
- * Set the |uri| from which the data source will read,
+ * Set the `uri` from which the data source will read,
* plus additional http headers when initiating the request.
*
- * Headers will contain corresponding items from |key_values|
+ * Headers will contain corresponding items from `key_values`
* in the following fashion:
*
* key_values[0]:key_values[1]
diff --git a/services/audioflinger/Effects.h b/services/audioflinger/Effects.h
index 505f3b3..d2a4cb4 100644
--- a/services/audioflinger/Effects.h
+++ b/services/audioflinger/Effects.h
@@ -702,7 +702,7 @@
uint32_t mRightVolume; // previous volume on right channel
uint32_t mNewLeftVolume; // new volume on left channel
uint32_t mNewRightVolume; // new volume on right channel
- product_strategy_t mStrategy; // strategy for this effect chain
+ product_strategy_t mStrategy = PRODUCT_STRATEGY_NONE; // strategy for this effect chain
// mSuspendedEffects lists all effects currently suspended in the chain.
// Use effect type UUID timelow field as key. There is no real risk of identical
// timeLow fields among effect type UUIDs.
@@ -797,7 +797,7 @@
void checkSuspendOnEffectEnabled(const sp<IAfEffectBase>& effect __unused,
bool enabled __unused, bool threadLocked __unused) override {}
void resetVolume_l() override REQUIRES(audio_utils::EffectChain_Mutex) {}
- product_strategy_t strategy() const override { return static_cast<product_strategy_t>(0); }
+ product_strategy_t strategy() const override { return PRODUCT_STRATEGY_NONE; }
int32_t activeTrackCnt() const override { return 0; }
void onEffectEnable(const sp<IAfEffectBase>& effect __unused) override;
void onEffectDisable(const sp<IAfEffectBase>& effect __unused) override;
diff --git a/services/audiopolicy/common/include/policy.h b/services/audiopolicy/common/include/policy.h
index 4643bd1..cf594c6 100644
--- a/services/audiopolicy/common/include/policy.h
+++ b/services/audiopolicy/common/include/policy.h
@@ -33,10 +33,6 @@
static const uint32_t SONIFICATION_RESPECTFUL_AFTER_MUSIC_DELAY = 5000;
-// For mixed output and inputs, the policy will use max mixer sampling rates.
-// Do not limit sampling rate otherwise
-#define SAMPLE_RATE_HZ_MAX 192000
-
// Used when a client opens a capture stream, without specifying a desired sample rate.
#define SAMPLE_RATE_HZ_DEFAULT 48000
diff --git a/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
index c502fc2..7002e63 100644
--- a/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
@@ -282,6 +282,11 @@
const AudioProfileVector& getSupportedProfiles() { return mSupportedProfiles; }
+ /**
+ * @brief checks if all devices in device vector are attached to the HwModule or not
+ * @return true if all the devices in device vector are attached, otherwise false
+ */
+ bool areAllDevicesAttached() const;
// Return a string to describe the DeviceVector. The sensitive information will only be
// added to the string if `includeSensitiveInfo` is true.
std::string toString(bool includeSensitiveInfo = false) const;
diff --git a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
index 9f7b8fc..46a04de 100644
--- a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
@@ -541,4 +541,14 @@
return filteredDevices;
}
+bool DeviceVector::areAllDevicesAttached() const
+{
+ for (const auto &device : *this) {
+ if (!device->isAttached()) {
+ return false;
+ }
+ }
+ return true;
+}
+
} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/PolicyAudioPort.cpp b/services/audiopolicy/common/managerdefinitions/src/PolicyAudioPort.cpp
index ce8178f..0ee84c1 100644
--- a/services/audiopolicy/common/managerdefinitions/src/PolicyAudioPort.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/PolicyAudioPort.cpp
@@ -20,6 +20,7 @@
#include "PolicyAudioPort.h"
#include "HwModule.h"
#include <policy.h>
+#include <system/audio.h>
#ifndef ARRAY_SIZE
#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index 5187c36..ce597e2 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -374,6 +374,7 @@
checkLeBroadcastRoutes(wasLeUnicastActive, nullptr, 0);
mpClientInterface->onAudioPortListUpdate();
+ ALOGV("%s() completed for device: %s", __func__, device->toString().c_str());
return NO_ERROR;
} // end if is output device
@@ -389,6 +390,8 @@
return INVALID_OPERATION;
}
+ ALOGV("%s() connecting device %s", __func__, device->toString().c_str());
+
if (mAvailableInputDevices.add(device) < 0) {
return NO_MEMORY;
}
@@ -461,6 +464,7 @@
}
mpClientInterface->onAudioPortListUpdate();
+ ALOGV("%s() completed for device: %s", __func__, device->toString().c_str());
return NO_ERROR;
} // end if is input device
@@ -1516,7 +1520,7 @@
(config->channel_mask == desc->getChannelMask()) &&
(session == desc->mDirectClientSession)) {
desc->mDirectOpenCount++;
- ALOGV("%s reusing direct output %d for session %d", __func__,
+ ALOGI("%s reusing direct output %d for session %d", __func__,
mOutputs.keyAt(i), session);
*output = mOutputs.keyAt(i);
return NO_ERROR;
@@ -1526,17 +1530,23 @@
if (!profile->canOpenNewIo()) {
if (!com::android::media::audioserver::direct_track_reprioritization()) {
+ ALOGW("%s profile %s can't open new output maxOpenCount reached", __func__,
+ profile->getName().c_str());
return NAME_NOT_FOUND;
} else if ((profile->getFlags() & AUDIO_OUTPUT_FLAG_MMAP_NOIRQ) != 0) {
// MMAP gracefully handles lack of an exclusive track resource by mixing
// above the audio framework. For AAudio to know that the limit is reached,
// return an error.
+ ALOGW("%s profile %s can't open new mmap output maxOpenCount reached", __func__,
+ profile->getName().c_str());
return NAME_NOT_FOUND;
} else {
// Close outputs on this profile, if available, to free resources for this request
for (int i = 0; i < mOutputs.size() && !profile->canOpenNewIo(); i++) {
const auto desc = mOutputs.valueAt(i);
if (desc->mProfile == profile) {
+ ALOGV("%s closeOutput %d to prioritize session %d on profile %s", __func__,
+ desc->mIoHandle, session, profile->getName().c_str());
closeOutput(desc->mIoHandle);
}
}
@@ -1545,6 +1555,8 @@
// Unable to close streams to find free resources for this request
if (!profile->canOpenNewIo()) {
+ ALOGW("%s profile %s can't open new output maxOpenCount reached", __func__,
+ profile->getName().c_str());
return NAME_NOT_FOUND;
}
@@ -3276,8 +3288,8 @@
ALOGW("%s: no group for stream %s, bailing out", __func__, toString(stream).c_str());
return NO_ERROR;
}
- ALOGV("%s: stream %s attributes=%s", __func__,
- toString(stream).c_str(), toString(attributes).c_str());
+ ALOGV("%s: stream %s attributes=%s, index %d , device 0x%X", __func__,
+ toString(stream).c_str(), toString(attributes).c_str(), index, device);
return setVolumeIndexForAttributes(attributes, index, device);
}
@@ -3455,8 +3467,8 @@
bool hasVoice = hasVoiceStream(volumeCurves.getStreamTypes());
if (((index < volumeCurves.getVolumeIndexMin()) && !(hasVoice && index == 0)) ||
(index > volumeCurves.getVolumeIndexMax())) {
- ALOGD("%s: wrong index %d min=%d max=%d", __FUNCTION__, index,
- volumeCurves.getVolumeIndexMin(), volumeCurves.getVolumeIndexMax());
+ ALOGE("%s: wrong index %d min=%d max=%d, device 0x%X", __FUNCTION__, index,
+ volumeCurves.getVolumeIndexMin(), volumeCurves.getVolumeIndexMax(), device);
return BAD_VALUE;
}
if (!audio_is_output_device(device)) {
@@ -6386,6 +6398,14 @@
if (!mConfig->getOutputDevices().contains(supportedDevice)) {
continue;
}
+
+ if (outProfile->isMmap() && !outProfile->hasDynamicAudioProfile()
+ && availProfileDevices.areAllDevicesAttached()) {
+ ALOGV("%s skip opening output for mmap profile %s", __func__,
+ outProfile->getTagName().c_str());
+ continue;
+ }
+
sp<SwAudioOutputDescriptor> outputDesc = new SwAudioOutputDescriptor(outProfile,
mpClientInterface);
audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
@@ -6445,6 +6465,14 @@
__func__, inProfile->getTagName().c_str());
continue;
}
+
+ if (inProfile->isMmap() && !inProfile->hasDynamicAudioProfile()
+ && availProfileDevices.areAllDevicesAttached()) {
+ ALOGV("%s skip opening input for mmap profile %s", __func__,
+ inProfile->getTagName().c_str());
+ continue;
+ }
+
sp<AudioInputDescriptor> inputDesc =
new AudioInputDescriptor(inProfile, mpClientInterface);
@@ -6452,12 +6480,12 @@
status_t status = inputDesc->open(nullptr,
availProfileDevices.itemAt(0),
AUDIO_SOURCE_MIC,
- AUDIO_INPUT_FLAG_NONE,
+ (audio_input_flags_t) inProfile->getFlags(),
&input);
if (status != NO_ERROR) {
- ALOGW("Cannot open input stream for device %s on hw module %s",
- availProfileDevices.toString().c_str(),
- hwModule->getName());
+ ALOGW("%s: Cannot open input stream for device %s for profile %s on hw module %s",
+ __func__, availProfileDevices.toString().c_str(),
+ inProfile->getTagName().c_str(), hwModule->getName());
continue;
}
for (const auto &device : availProfileDevices) {
@@ -6565,8 +6593,8 @@
sp<IOProfile> profile = hwModule->getOutputProfiles()[j];
if (profile->supportsDevice(device)) {
profiles.add(profile);
- ALOGV("checkOutputsForDevice(): adding profile %zu from module %s",
- j, hwModule->getName());
+ ALOGV("%s(): adding profile %s from module %s",
+ __func__, profile->getTagName().c_str(), hwModule->getName());
}
}
}
@@ -6599,7 +6627,11 @@
if (j != outputs.size()) {
continue;
}
-
+ if (profile->isMmap() && !profile->hasDynamicAudioProfile()) {
+ ALOGV("%s skip opening output for mmap profile %s",
+ __func__, profile->getTagName().c_str());
+ continue;
+ }
if (!profile->canOpenNewIo()) {
ALOGW("Max Output number %u already opened for this profile %s",
profile->maxOpenCount, profile->getTagName().c_str());
@@ -6660,9 +6692,8 @@
if (!profile->supportsDevice(device)) {
continue;
}
- ALOGV("checkOutputsForDevice(): "
- "clearing direct output profile %zu on module %s",
- j, hwModule->getName());
+ ALOGV("%s(): clearing direct output profile %s on module %s",
+ __func__, profile->getTagName().c_str(), hwModule->getName());
profile->clearAudioProfiles();
if (!profile->hasDynamicAudioProfile()) {
continue;
@@ -6717,8 +6748,8 @@
if (profile->supportsDevice(device)) {
profiles.add(profile);
- ALOGV("checkInputsForDevice(): adding profile %zu from module %s",
- profile_index, hwModule->getName());
+ ALOGV("%s : adding profile %s from module %s", __func__,
+ profile->getTagName().c_str(), hwModule->getName());
}
}
}
@@ -6750,15 +6781,22 @@
continue;
}
+ if (profile->isMmap() && !profile->hasDynamicAudioProfile()) {
+ ALOGV("%s skip opening input for mmap profile %s",
+ __func__, profile->getTagName().c_str());
+ continue;
+ }
if (!profile->canOpenNewIo()) {
- ALOGW("Max Input number %u already opened for this profile %s",
- profile->maxOpenCount, profile->getTagName().c_str());
+ ALOGW("%s Max Input number %u already opened for this profile %s",
+ __func__, profile->maxOpenCount, profile->getTagName().c_str());
continue;
}
desc = new AudioInputDescriptor(profile, mpClientInterface);
audio_io_handle_t input = AUDIO_IO_HANDLE_NONE;
- status = desc->open(nullptr, device, AUDIO_SOURCE_MIC, AUDIO_INPUT_FLAG_NONE, &input);
+ ALOGV("%s opening input for profile %s", __func__, profile->getTagName().c_str());
+ status = desc->open(nullptr, device, AUDIO_SOURCE_MIC,
+ (audio_input_flags_t) profile->getFlags(), &input);
if (status == NO_ERROR) {
const String8& address = String8(device->address().c_str());
@@ -6769,7 +6807,8 @@
}
updateAudioProfiles(device, input, profile);
if (!profile->hasValidAudioProfile()) {
- ALOGW("checkInputsForDevice() direct input missing param");
+ ALOGW("%s direct input missing param for profile %s", __func__,
+ profile->getTagName().c_str());
desc->close();
input = AUDIO_IO_HANDLE_NONE;
}
@@ -6780,18 +6819,20 @@
} // endif input != 0
if (input == AUDIO_IO_HANDLE_NONE) {
- ALOGW("%s could not open input for device %s", __func__,
- device->toString().c_str());
+ ALOGW("%s could not open input for device %s on profile %s", __func__,
+ device->toString().c_str(), profile->getTagName().c_str());
profiles.removeAt(profile_index);
profile_index--;
} else {
if (audio_device_is_digital(device->type())) {
device->importAudioPortAndPickAudioProfile(profile);
}
- ALOGV("checkInputsForDevice(): adding input %d", input);
+ ALOGV("%s: adding input %d for profile %s", __func__,
+ input, profile->getTagName().c_str());
if (checkCloseInput(desc)) {
- ALOGV("%s closing input %d", __func__, input);
+ ALOGV("%s: closing input %d for profile %s", __func__,
+ input, profile->getTagName().c_str());
closeInput(input);
}
}
@@ -6810,8 +6851,8 @@
profile_index++) {
sp<IOProfile> profile = hwModule->getInputProfiles()[profile_index];
if (profile->supportsDevice(device)) {
- ALOGV("checkInputsForDevice(): clearing direct input profile %zu on module %s",
- profile_index, hwModule->getName());
+ ALOGV("%s: clearing direct input profile %s on module %s", __func__,
+ profile->getTagName().c_str(), hwModule->getName());
profile->clearAudioProfiles();
}
}
diff --git a/services/camera/libcameraservice/api2/HeicCompositeStream.cpp b/services/camera/libcameraservice/api2/HeicCompositeStream.cpp
index 68e9ad4..fe99241 100644
--- a/services/camera/libcameraservice/api2/HeicCompositeStream.cpp
+++ b/services/camera/libcameraservice/api2/HeicCompositeStream.cpp
@@ -1583,7 +1583,7 @@
// The chrome plane could be either Cb first, or Cr first. Take the
// smaller address.
uint8_t *src = std::min(yuvBuffer.dataCb, yuvBuffer.dataCr);
- MediaImage2::PlaneIndex dstPlane = codecUvOffsetDiff > 0 ? MediaImage2::U : MediaImage2::V;
+ MediaImage2::PlaneIndex dstPlane = codecUPlaneFirst ? MediaImage2::U : MediaImage2::V;
for (auto row = top/2; row < (top+height)/2; row++) {
uint8_t *dst = codecBuffer->data() + imageInfo->mPlane[dstPlane].mOffset +
imageInfo->mPlane[dstPlane].mRowInc * (row - top/2);
diff --git a/services/camera/libcameraservice/fuzzer/Android.bp b/services/camera/libcameraservice/fuzzer/Android.bp
index 7760f6a..667ba02 100644
--- a/services/camera/libcameraservice/fuzzer/Android.bp
+++ b/services/camera/libcameraservice/fuzzer/Android.bp
@@ -26,7 +26,18 @@
cc_defaults {
name: "libcameraservice_fuzz_defaults",
fuzz_config: {
- componentid: 41727
+ cc: [
+ "android-camera-fwk-eng@google.com",
+ ],
+ componentid: 41727,
+ hotlists: [
+ "4593311",
+ ],
+ description: "The fuzzer targets the APIs of libcameraservice",
+ vector: "local_no_privileges_required",
+ service_privilege: "privileged",
+ users: "multi_user",
+ fuzzed_code_usage: "shipped",
},
}
@@ -37,9 +48,9 @@
"DistortionMapperFuzzer.cpp",
],
shared_libs: [
- "libcameraservice",
+ "camera_platform_flags_c_lib",
"libcamera_client",
- "camera_platform_flags_c_lib"
+ "libcameraservice",
],
}
@@ -50,8 +61,8 @@
"DepthProcessorFuzzer.cpp",
],
shared_libs: [
+ "camera_platform_flags_c_lib",
"libcameraservice",
- "camera_platform_flags_c_lib"
],
corpus: ["corpus/*.jpg"],
}
diff --git a/services/camera/libcameraservice/fuzzer/DepthProcessorFuzzer.cpp b/services/camera/libcameraservice/fuzzer/DepthProcessorFuzzer.cpp
index 650ca91..5c5e177 100644
--- a/services/camera/libcameraservice/fuzzer/DepthProcessorFuzzer.cpp
+++ b/services/camera/libcameraservice/fuzzer/DepthProcessorFuzzer.cpp
@@ -14,49 +14,92 @@
* limitations under the License.
*/
-#include <array>
-#include <vector>
+#include "common/DepthPhotoProcessor.h"
+
+#include <random>
#include <fuzzer/FuzzedDataProvider.h>
-#include "common/DepthPhotoProcessor.h"
-
using namespace android;
using namespace android::camera3;
-static const size_t kTestBufferWidth = 640;
-static const size_t kTestBufferHeight = 480;
-static const size_t kTestBufferDepthSize (kTestBufferWidth * kTestBufferHeight);
+static const float kMinRatio = 0.1f;
+static const float kMaxRatio = 0.9f;
-void generateDepth16Buffer(const uint8_t* data, size_t size, std::array<uint16_t, kTestBufferDepthSize> *depth16Buffer /*out*/) {
- FuzzedDataProvider dataProvider(data, size);
- for (size_t i = 0; i < depth16Buffer->size(); i++) {
- (*depth16Buffer)[i] = dataProvider.ConsumeIntegral<uint16_t>();
+static const uint8_t kTotalDepthJpegBufferCount = 3;
+static const uint8_t kIntrinsicCalibrationSize = 5;
+static const uint8_t kLensDistortionSize = 5;
+
+static const DepthPhotoOrientation kDepthPhotoOrientations[] = {
+ DepthPhotoOrientation::DEPTH_ORIENTATION_0_DEGREES,
+ DepthPhotoOrientation::DEPTH_ORIENTATION_90_DEGREES,
+ DepthPhotoOrientation::DEPTH_ORIENTATION_180_DEGREES,
+ DepthPhotoOrientation::DEPTH_ORIENTATION_270_DEGREES};
+
+void generateDepth16Buffer(std::vector<uint16_t>* depth16Buffer /*out*/, size_t length,
+ FuzzedDataProvider& fdp) {
+ std::default_random_engine gen(fdp.ConsumeIntegral<uint8_t>());
+ std::uniform_int_distribution uniDist(0, UINT16_MAX - 1);
+ for (size_t i = 0; i < length; ++i) {
+ (*depth16Buffer)[i] = uniDist(gen);
}
}
extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
+ FuzzedDataProvider fdp(data, size);
+
DepthPhotoInputFrame inputFrame;
+
+ /**
+ * Consuming 80% of the data to set mMainJpegBuffer. This ensures that we
+ * don't completely exhaust data and use the rest 20% for fuzzing of APIs.
+ */
+ std::vector<uint8_t> buffer = fdp.ConsumeBytes<uint8_t>((size * 80) / 100);
+ inputFrame.mMainJpegBuffer = reinterpret_cast<const char*>(buffer.data());
+
+ /**
+ * Calculate height and width based on buffer size and a ratio within [0.1, 0.9].
+ * The ratio adjusts the dimensions while maintaining a relationship to the total buffer size.
+ */
+ const float ratio = fdp.ConsumeFloatingPointInRange<float>(kMinRatio, kMaxRatio);
+ const size_t height = std::sqrt(buffer.size()) * ratio;
+ const size_t width = std::sqrt(buffer.size()) / ratio;
+
+ inputFrame.mMainJpegHeight = height;
+ inputFrame.mMainJpegWidth = width;
+ inputFrame.mMainJpegSize = buffer.size();
// Worst case both depth and confidence maps have the same size as the main color image.
- inputFrame.mMaxJpegSize = inputFrame.mMainJpegSize * 3;
+ inputFrame.mMaxJpegSize = inputFrame.mMainJpegSize * kTotalDepthJpegBufferCount;
+
+ std::vector<uint16_t> depth16Buffer(height * width);
+ generateDepth16Buffer(&depth16Buffer, height * width, fdp);
+ inputFrame.mDepthMapBuffer = depth16Buffer.data();
+ inputFrame.mDepthMapHeight = height;
+ inputFrame.mDepthMapWidth = inputFrame.mDepthMapStride = width;
+
+ inputFrame.mIsLogical = fdp.ConsumeBool();
+
+ inputFrame.mOrientation = fdp.PickValueInArray<DepthPhotoOrientation>(kDepthPhotoOrientations);
+
+ if (fdp.ConsumeBool()) {
+ for (uint8_t i = 0; i < kIntrinsicCalibrationSize; ++i) {
+ inputFrame.mIntrinsicCalibration[i] = fdp.ConsumeFloatingPoint<float>();
+ }
+ inputFrame.mIsIntrinsicCalibrationValid = 1;
+ }
+
+ if (fdp.ConsumeBool()) {
+ for (uint8_t i = 0; i < kLensDistortionSize; ++i) {
+ inputFrame.mLensDistortion[i] = fdp.ConsumeFloatingPoint<float>();
+ }
+ inputFrame.mIsLensDistortionValid = 1;
+ }
std::vector<uint8_t> depthPhotoBuffer(inputFrame.mMaxJpegSize);
size_t actualDepthPhotoSize = 0;
- std::array<uint16_t, kTestBufferDepthSize> depth16Buffer;
- generateDepth16Buffer(data, size, &depth16Buffer);
+ processDepthPhotoFrame(inputFrame, depthPhotoBuffer.size(), depthPhotoBuffer.data(),
+ &actualDepthPhotoSize);
- inputFrame.mMainJpegBuffer = reinterpret_cast<const char*> (data);
- inputFrame.mMainJpegSize = size;
- inputFrame.mDepthMapBuffer = depth16Buffer.data();
- inputFrame.mDepthMapStride = kTestBufferWidth;
- inputFrame.mDepthMapWidth = kTestBufferWidth;
- inputFrame.mDepthMapHeight = kTestBufferHeight;
- processDepthPhotoFrame(
- inputFrame,
- depthPhotoBuffer.size(),
- depthPhotoBuffer.data(),
- &actualDepthPhotoSize);
-
- return 0;
+ return 0;
}
diff --git a/services/mediametrics/include/mediametricsservice/TimedAction.h b/services/mediametrics/include/mediametricsservice/TimedAction.h
index 8b53ded..8901ced 100644
--- a/services/mediametrics/include/mediametricsservice/TimedAction.h
+++ b/services/mediametrics/include/mediametricsservice/TimedAction.h
@@ -81,9 +81,8 @@
void threadLoop() NO_THREAD_SAFETY_ANALYSIS { // thread safety doesn't cover unique_lock
std::unique_lock l(mLock);
while (!mQuit) {
- auto sleepUntilTime = std::chrono::time_point<TimerClock>::max();
if (!mMap.empty()) {
- sleepUntilTime = mMap.begin()->first;
+ auto sleepUntilTime = mMap.begin()->first;
const auto now = TimerClock::now();
if (sleepUntilTime <= now) {
auto node = mMap.extract(mMap.begin()); // removes from mMap.
@@ -96,8 +95,17 @@
// of REALTIME specification, use kWakeupInterval to ensure minimum
// granularity if suspended.
sleepUntilTime = std::min(sleepUntilTime, now + kWakeupInterval);
+ mCondition.wait_until(l, sleepUntilTime);
+ } else {
+ // As TimerClock is system_clock (which is not monotonic), libcxx's
+ // implementation of condition_variable::wait_until(l, std::chrono::time_point)
+ // recalculates the 'until' time into the wait duration and then goes back to the
+ // absolute timestamp when calling pthread_cond_timedwait(); this back-and-forth
+ // calculation sometimes loses the 'max' value because enough time passes in
+ // between, and instead passes incorrect timestamp into the syscall, causing a
+ // crash. Mitigating it by explicitly calling the non-timed wait here.
+ mCondition.wait(l);
}
- mCondition.wait_until(l, sleepUntilTime);
}
}