Merge "restrict binder transactions to audioserver"
diff --git a/media/libeffects/lvm/lib/Eq/src/LVEQNB_Control.c b/media/libeffects/lvm/lib/Eq/src/LVEQNB_Control.c
index c290aec..7b0f341 100644
--- a/media/libeffects/lvm/lib/Eq/src/LVEQNB_Control.c
+++ b/media/libeffects/lvm/lib/Eq/src/LVEQNB_Control.c
@@ -430,7 +430,15 @@
}
- if(bChange){
+ // During operating mode transition, there is a race condition where the mode
+ // is still LVEQNB_ON, but the effect is considered disabled in the upper layers.
+ // modeChange handles this special race condition.
+ const int /* bool */ modeChange = pParams->OperatingMode != OperatingModeSave
+ || (OperatingModeSave == LVEQNB_ON
+ && pInstance->bInOperatingModeTransition
+ && LVC_Mixer_GetTarget(&pInstance->BypassMixer.MixerStream[0]) == 0);
+
+ if (bChange || modeChange) {
/*
* If the sample rate has changed clear the history
@@ -462,8 +470,7 @@
LVEQNB_SetCoefficients(pInstance); /* Instance pointer */
}
- if(pParams->OperatingMode != OperatingModeSave)
- {
+ if (modeChange) {
if(pParams->OperatingMode == LVEQNB_ON)
{
#ifdef BUILD_FLOAT
@@ -479,6 +486,8 @@
else
{
/* Stay on the ON operating mode until the transition is done */
+ // This may introduce a state race condition if the effect is enabled again
+ // while in transition. This is fixed in the modeChange logic.
pInstance->Params.OperatingMode = LVEQNB_ON;
#ifdef BUILD_FLOAT
LVC_Mixer_SetTarget(&pInstance->BypassMixer.MixerStream[0], 0.0f);
diff --git a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
index 146e9e8..8ebae11 100644
--- a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
+++ b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
@@ -3330,14 +3330,19 @@
//ALOGV("\tEffect_process Not Calling process with %d effects enabled, %d called: Effect %d",
//pContext->pBundledContext->NumberEffectsEnabled,
//pContext->pBundledContext->NumberEffectsCalled, pContext->EffectType);
- // 2 is for stereo input
+
if (pContext->config.outputCfg.accessMode == EFFECT_BUFFER_ACCESS_ACCUMULATE) {
- for (size_t i=0; i < outBuffer->frameCount*2; i++){
- outBuffer->s16[i] =
- clamp16((LVM_INT32)outBuffer->s16[i] + (LVM_INT32)inBuffer->s16[i]);
+ for (size_t i = 0; i < outBuffer->frameCount * FCC_2; ++i){
+#ifdef NATIVE_FLOAT_BUFFER
+ outBuffer->f32[i] += inBuffer->f32[i];
+#else
+ outBuffer->s16[i] = clamp16((LVM_INT32)outBuffer->s16[i] + inBuffer->s16[i]);
+#endif
}
} else if (outBuffer->raw != inBuffer->raw) {
- memcpy(outBuffer->raw, inBuffer->raw, outBuffer->frameCount*sizeof(LVM_INT16)*2);
+ memcpy(outBuffer->raw,
+ inBuffer->raw,
+ outBuffer->frameCount * sizeof(effect_buffer_t) * FCC_2);
}
}
diff --git a/media/libstagefright/MPEG4Writer.cpp b/media/libstagefright/MPEG4Writer.cpp
index 1fe5f60..8db00f0 100644
--- a/media/libstagefright/MPEG4Writer.cpp
+++ b/media/libstagefright/MPEG4Writer.cpp
@@ -2951,212 +2951,215 @@
mGotStartKeyFrame = true;
}
////////////////////////////////////////////////////////////////////////////////
- if (mStszTableEntries->count() == 0) {
- mFirstSampleTimeRealUs = systemTime() / 1000;
- mStartTimestampUs = timestampUs;
- mOwner->setStartTimestampUs(mStartTimestampUs);
- previousPausedDurationUs = mStartTimestampUs;
- }
- if (mResumed) {
- int64_t durExcludingEarlierPausesUs = timestampUs - previousPausedDurationUs;
- if (WARN_UNLESS(durExcludingEarlierPausesUs >= 0ll, "for %s track", trackName)) {
- copy->release();
- mSource->stop();
- mIsMalformed = true;
- break;
- }
-
- int64_t pausedDurationUs = durExcludingEarlierPausesUs - mTrackDurationUs;
- if (WARN_UNLESS(pausedDurationUs >= lastDurationUs, "for %s track", trackName)) {
- copy->release();
- mSource->stop();
- mIsMalformed = true;
- break;
- }
-
- previousPausedDurationUs += pausedDurationUs - lastDurationUs;
- mResumed = false;
- }
- TimestampDebugHelperEntry timestampDebugEntry;
- timestampUs -= previousPausedDurationUs;
- timestampDebugEntry.pts = timestampUs;
- if (WARN_UNLESS(timestampUs >= 0ll, "for %s track", trackName)) {
- copy->release();
- mSource->stop();
- mIsMalformed = true;
- break;
- }
-
- if (mIsVideo) {
- /*
- * Composition time: timestampUs
- * Decoding time: decodingTimeUs
- * Composition time offset = composition time - decoding time
- */
- int64_t decodingTimeUs;
- CHECK(meta_data->findInt64(kKeyDecodingTime, &decodingTimeUs));
- decodingTimeUs -= previousPausedDurationUs;
-
- // ensure non-negative, monotonic decoding time
- if (mLastDecodingTimeUs < 0) {
- decodingTimeUs = std::max((int64_t)0, decodingTimeUs);
- } else {
- // increase decoding time by at least the larger vaule of 1 tick and
- // 0.1 milliseconds. This needs to take into account the possible
- // delta adjustment in DurationTicks in below.
- decodingTimeUs = std::max(mLastDecodingTimeUs +
- std::max(100, divUp(1000000, mTimeScale)), decodingTimeUs);
- }
-
- mLastDecodingTimeUs = decodingTimeUs;
- timestampDebugEntry.dts = decodingTimeUs;
- timestampDebugEntry.frameType = isSync ? "Key frame" : "Non-Key frame";
- // Insert the timestamp into the mTimestampDebugHelper
- if (mTimestampDebugHelper.size() >= kTimestampDebugCount) {
- mTimestampDebugHelper.pop_front();
- }
- mTimestampDebugHelper.push_back(timestampDebugEntry);
-
- cttsOffsetTimeUs =
- timestampUs + kMaxCttsOffsetTimeUs - decodingTimeUs;
- if (WARN_UNLESS(cttsOffsetTimeUs >= 0ll, "for %s track", trackName)) {
- copy->release();
- mSource->stop();
- mIsMalformed = true;
- break;
- }
-
- timestampUs = decodingTimeUs;
- ALOGV("decoding time: %" PRId64 " and ctts offset time: %" PRId64,
- timestampUs, cttsOffsetTimeUs);
-
- // Update ctts box table if necessary
- currCttsOffsetTimeTicks =
- (cttsOffsetTimeUs * mTimeScale + 500000LL) / 1000000LL;
- if (WARN_UNLESS(currCttsOffsetTimeTicks <= 0x0FFFFFFFFLL, "for %s track", trackName)) {
- copy->release();
- mSource->stop();
- mIsMalformed = true;
- break;
- }
-
+ if (!mIsHeic) {
if (mStszTableEntries->count() == 0) {
- // Force the first ctts table entry to have one single entry
- // so that we can do adjustment for the initial track start
- // time offset easily in writeCttsBox().
- lastCttsOffsetTimeTicks = currCttsOffsetTimeTicks;
- addOneCttsTableEntry(1, currCttsOffsetTimeTicks);
- cttsSampleCount = 0; // No sample in ctts box is pending
- } else {
- if (currCttsOffsetTimeTicks != lastCttsOffsetTimeTicks) {
- addOneCttsTableEntry(cttsSampleCount, lastCttsOffsetTimeTicks);
- lastCttsOffsetTimeTicks = currCttsOffsetTimeTicks;
- cttsSampleCount = 1; // One sample in ctts box is pending
+ mFirstSampleTimeRealUs = systemTime() / 1000;
+ mStartTimestampUs = timestampUs;
+ mOwner->setStartTimestampUs(mStartTimestampUs);
+ previousPausedDurationUs = mStartTimestampUs;
+ }
+
+ if (mResumed) {
+ int64_t durExcludingEarlierPausesUs = timestampUs - previousPausedDurationUs;
+ if (WARN_UNLESS(durExcludingEarlierPausesUs >= 0ll, "for %s track", trackName)) {
+ copy->release();
+ mSource->stop();
+ mIsMalformed = true;
+ break;
+ }
+
+ int64_t pausedDurationUs = durExcludingEarlierPausesUs - mTrackDurationUs;
+ if (WARN_UNLESS(pausedDurationUs >= lastDurationUs, "for %s track", trackName)) {
+ copy->release();
+ mSource->stop();
+ mIsMalformed = true;
+ break;
+ }
+
+ previousPausedDurationUs += pausedDurationUs - lastDurationUs;
+ mResumed = false;
+ }
+ TimestampDebugHelperEntry timestampDebugEntry;
+ timestampUs -= previousPausedDurationUs;
+ timestampDebugEntry.pts = timestampUs;
+ if (WARN_UNLESS(timestampUs >= 0ll, "for %s track", trackName)) {
+ copy->release();
+ mSource->stop();
+ mIsMalformed = true;
+ break;
+ }
+
+ if (mIsVideo) {
+ /*
+ * Composition time: timestampUs
+ * Decoding time: decodingTimeUs
+ * Composition time offset = composition time - decoding time
+ */
+ int64_t decodingTimeUs;
+ CHECK(meta_data->findInt64(kKeyDecodingTime, &decodingTimeUs));
+ decodingTimeUs -= previousPausedDurationUs;
+
+ // ensure non-negative, monotonic decoding time
+ if (mLastDecodingTimeUs < 0) {
+ decodingTimeUs = std::max((int64_t)0, decodingTimeUs);
} else {
- ++cttsSampleCount;
+ // increase decoding time by at least the larger vaule of 1 tick and
+ // 0.1 milliseconds. This needs to take into account the possible
+ // delta adjustment in DurationTicks in below.
+ decodingTimeUs = std::max(mLastDecodingTimeUs +
+ std::max(100, divUp(1000000, mTimeScale)), decodingTimeUs);
}
- }
- // Update ctts time offset range
- if (mStszTableEntries->count() == 0) {
- mMinCttsOffsetTicks = currCttsOffsetTimeTicks;
- mMaxCttsOffsetTicks = currCttsOffsetTimeTicks;
- } else {
- if (currCttsOffsetTimeTicks > mMaxCttsOffsetTicks) {
- mMaxCttsOffsetTicks = currCttsOffsetTimeTicks;
- } else if (currCttsOffsetTimeTicks < mMinCttsOffsetTicks) {
+ mLastDecodingTimeUs = decodingTimeUs;
+ timestampDebugEntry.dts = decodingTimeUs;
+ timestampDebugEntry.frameType = isSync ? "Key frame" : "Non-Key frame";
+ // Insert the timestamp into the mTimestampDebugHelper
+ if (mTimestampDebugHelper.size() >= kTimestampDebugCount) {
+ mTimestampDebugHelper.pop_front();
+ }
+ mTimestampDebugHelper.push_back(timestampDebugEntry);
+
+ cttsOffsetTimeUs =
+ timestampUs + kMaxCttsOffsetTimeUs - decodingTimeUs;
+ if (WARN_UNLESS(cttsOffsetTimeUs >= 0ll, "for %s track", trackName)) {
+ copy->release();
+ mSource->stop();
+ mIsMalformed = true;
+ break;
+ }
+
+ timestampUs = decodingTimeUs;
+ ALOGV("decoding time: %" PRId64 " and ctts offset time: %" PRId64,
+ timestampUs, cttsOffsetTimeUs);
+
+ // Update ctts box table if necessary
+ currCttsOffsetTimeTicks =
+ (cttsOffsetTimeUs * mTimeScale + 500000LL) / 1000000LL;
+ if (WARN_UNLESS(currCttsOffsetTimeTicks <= 0x0FFFFFFFFLL, "for %s track", trackName)) {
+ copy->release();
+ mSource->stop();
+ mIsMalformed = true;
+ break;
+ }
+
+ if (mStszTableEntries->count() == 0) {
+ // Force the first ctts table entry to have one single entry
+ // so that we can do adjustment for the initial track start
+ // time offset easily in writeCttsBox().
+ lastCttsOffsetTimeTicks = currCttsOffsetTimeTicks;
+ addOneCttsTableEntry(1, currCttsOffsetTimeTicks);
+ cttsSampleCount = 0; // No sample in ctts box is pending
+ } else {
+ if (currCttsOffsetTimeTicks != lastCttsOffsetTimeTicks) {
+ addOneCttsTableEntry(cttsSampleCount, lastCttsOffsetTimeTicks);
+ lastCttsOffsetTimeTicks = currCttsOffsetTimeTicks;
+ cttsSampleCount = 1; // One sample in ctts box is pending
+ } else {
+ ++cttsSampleCount;
+ }
+ }
+
+ // Update ctts time offset range
+ if (mStszTableEntries->count() == 0) {
mMinCttsOffsetTicks = currCttsOffsetTimeTicks;
- mMinCttsOffsetTimeUs = cttsOffsetTimeUs;
+ mMaxCttsOffsetTicks = currCttsOffsetTimeTicks;
+ } else {
+ if (currCttsOffsetTimeTicks > mMaxCttsOffsetTicks) {
+ mMaxCttsOffsetTicks = currCttsOffsetTimeTicks;
+ } else if (currCttsOffsetTimeTicks < mMinCttsOffsetTicks) {
+ mMinCttsOffsetTicks = currCttsOffsetTimeTicks;
+ mMinCttsOffsetTimeUs = cttsOffsetTimeUs;
+ }
}
}
- }
- if (mOwner->isRealTimeRecording()) {
- if (mIsAudio) {
- updateDriftTime(meta_data);
- }
- }
-
- if (WARN_UNLESS(timestampUs >= 0ll, "for %s track", trackName)) {
- copy->release();
- mSource->stop();
- mIsMalformed = true;
- break;
- }
-
- ALOGV("%s media time stamp: %" PRId64 " and previous paused duration %" PRId64,
- trackName, timestampUs, previousPausedDurationUs);
- if (timestampUs > mTrackDurationUs) {
- mTrackDurationUs = timestampUs;
- }
-
- // We need to use the time scale based ticks, rather than the
- // timestamp itself to determine whether we have to use a new
- // stts entry, since we may have rounding errors.
- // The calculation is intended to reduce the accumulated
- // rounding errors.
- currDurationTicks =
- ((timestampUs * mTimeScale + 500000LL) / 1000000LL -
- (lastTimestampUs * mTimeScale + 500000LL) / 1000000LL);
- if (currDurationTicks < 0ll) {
- ALOGE("do not support out of order frames (timestamp: %lld < last: %lld for %s track",
- (long long)timestampUs, (long long)lastTimestampUs, trackName);
- copy->release();
- mSource->stop();
- mIsMalformed = true;
- break;
- }
-
- // if the duration is different for this sample, see if it is close enough to the previous
- // duration that we can fudge it and use the same value, to avoid filling the stts table
- // with lots of near-identical entries.
- // "close enough" here means that the current duration needs to be adjusted by less
- // than 0.1 milliseconds
- if (lastDurationTicks && (currDurationTicks != lastDurationTicks)) {
- int64_t deltaUs = ((lastDurationTicks - currDurationTicks) * 1000000LL
- + (mTimeScale / 2)) / mTimeScale;
- if (deltaUs > -100 && deltaUs < 100) {
- // use previous ticks, and adjust timestamp as if it was actually that number
- // of ticks
- currDurationTicks = lastDurationTicks;
- timestampUs += deltaUs;
- }
- }
- mStszTableEntries->add(htonl(sampleSize));
- if (mStszTableEntries->count() > 2) {
-
- // Force the first sample to have its own stts entry so that
- // we can adjust its value later to maintain the A/V sync.
- if (mStszTableEntries->count() == 3 || currDurationTicks != lastDurationTicks) {
- addOneSttsTableEntry(sampleCount, lastDurationTicks);
- sampleCount = 1;
- } else {
- ++sampleCount;
+ if (mOwner->isRealTimeRecording()) {
+ if (mIsAudio) {
+ updateDriftTime(meta_data);
+ }
}
- }
- if (mSamplesHaveSameSize) {
- if (mStszTableEntries->count() >= 2 && previousSampleSize != sampleSize) {
- mSamplesHaveSameSize = false;
+ if (WARN_UNLESS(timestampUs >= 0ll, "for %s track", trackName)) {
+ copy->release();
+ mSource->stop();
+ mIsMalformed = true;
+ break;
}
- previousSampleSize = sampleSize;
- }
- ALOGV("%s timestampUs/lastTimestampUs: %" PRId64 "/%" PRId64,
- trackName, timestampUs, lastTimestampUs);
- lastDurationUs = timestampUs - lastTimestampUs;
- lastDurationTicks = currDurationTicks;
- lastTimestampUs = timestampUs;
- if (isSync != 0) {
- addOneStssTableEntry(mStszTableEntries->count());
- }
-
- if (mTrackingProgressStatus) {
- if (mPreviousTrackTimeUs <= 0) {
- mPreviousTrackTimeUs = mStartTimestampUs;
+ ALOGV("%s media time stamp: %" PRId64 " and previous paused duration %" PRId64,
+ trackName, timestampUs, previousPausedDurationUs);
+ if (timestampUs > mTrackDurationUs) {
+ mTrackDurationUs = timestampUs;
}
- trackProgressStatus(timestampUs);
+
+ // We need to use the time scale based ticks, rather than the
+ // timestamp itself to determine whether we have to use a new
+ // stts entry, since we may have rounding errors.
+ // The calculation is intended to reduce the accumulated
+ // rounding errors.
+ currDurationTicks =
+ ((timestampUs * mTimeScale + 500000LL) / 1000000LL -
+ (lastTimestampUs * mTimeScale + 500000LL) / 1000000LL);
+ if (currDurationTicks < 0ll) {
+ ALOGE("do not support out of order frames (timestamp: %lld < last: %lld for %s track",
+ (long long)timestampUs, (long long)lastTimestampUs, trackName);
+ copy->release();
+ mSource->stop();
+ mIsMalformed = true;
+ break;
+ }
+
+ // if the duration is different for this sample, see if it is close enough to the previous
+ // duration that we can fudge it and use the same value, to avoid filling the stts table
+ // with lots of near-identical entries.
+ // "close enough" here means that the current duration needs to be adjusted by less
+ // than 0.1 milliseconds
+ if (lastDurationTicks && (currDurationTicks != lastDurationTicks)) {
+ int64_t deltaUs = ((lastDurationTicks - currDurationTicks) * 1000000LL
+ + (mTimeScale / 2)) / mTimeScale;
+ if (deltaUs > -100 && deltaUs < 100) {
+ // use previous ticks, and adjust timestamp as if it was actually that number
+ // of ticks
+ currDurationTicks = lastDurationTicks;
+ timestampUs += deltaUs;
+ }
+ }
+ mStszTableEntries->add(htonl(sampleSize));
+ if (mStszTableEntries->count() > 2) {
+
+ // Force the first sample to have its own stts entry so that
+ // we can adjust its value later to maintain the A/V sync.
+ if (mStszTableEntries->count() == 3 || currDurationTicks != lastDurationTicks) {
+ addOneSttsTableEntry(sampleCount, lastDurationTicks);
+ sampleCount = 1;
+ } else {
+ ++sampleCount;
+ }
+
+ }
+ if (mSamplesHaveSameSize) {
+ if (mStszTableEntries->count() >= 2 && previousSampleSize != sampleSize) {
+ mSamplesHaveSameSize = false;
+ }
+ previousSampleSize = sampleSize;
+ }
+ ALOGV("%s timestampUs/lastTimestampUs: %" PRId64 "/%" PRId64,
+ trackName, timestampUs, lastTimestampUs);
+ lastDurationUs = timestampUs - lastTimestampUs;
+ lastDurationTicks = currDurationTicks;
+ lastTimestampUs = timestampUs;
+
+ if (isSync != 0) {
+ addOneStssTableEntry(mStszTableEntries->count());
+ }
+
+ if (mTrackingProgressStatus) {
+ if (mPreviousTrackTimeUs <= 0) {
+ mPreviousTrackTimeUs = mStartTimestampUs;
+ }
+ trackProgressStatus(timestampUs);
+ }
}
if (!hasMultipleTracks) {
size_t bytesWritten;
@@ -4331,9 +4334,12 @@
}
// patch up the mPrimaryItemId and count items with prop associations
+ uint16_t firstVisibleItemId = 0;
for (size_t index = 0; index < mItems.size(); index++) {
if (mItems[index].isPrimary) {
mPrimaryItemId = mItems[index].itemId;
+ } else if (!firstVisibleItemId && !mItems[index].isHidden) {
+ firstVisibleItemId = mItems[index].itemId;
}
if (!mItems[index].properties.empty()) {
@@ -4342,8 +4348,13 @@
}
if (mPrimaryItemId == 0) {
- ALOGW("didn't find primary, using first item");
- mPrimaryItemId = mItems[0].itemId;
+ if (firstVisibleItemId > 0) {
+ ALOGW("didn't find primary, using first visible item");
+ mPrimaryItemId = firstVisibleItemId;
+ } else {
+ ALOGW("no primary and no visible item, using first item");
+ mPrimaryItemId = mItems[0].itemId;
+ }
}
beginBox("meta");
diff --git a/media/libstagefright/httplive/M3UParser.cpp b/media/libstagefright/httplive/M3UParser.cpp
index 71d625f..bc3e57c 100644
--- a/media/libstagefright/httplive/M3UParser.cpp
+++ b/media/libstagefright/httplive/M3UParser.cpp
@@ -898,6 +898,9 @@
}
}
+ if (meta->get() == NULL) {
+ return ERROR_MALFORMED;
+ }
return OK;
}
diff --git a/media/libstagefright/omx/GraphicBufferSource.cpp b/media/libstagefright/omx/GraphicBufferSource.cpp
index a70005e..f331dbb 100644
--- a/media/libstagefright/omx/GraphicBufferSource.cpp
+++ b/media/libstagefright/omx/GraphicBufferSource.cpp
@@ -46,6 +46,36 @@
namespace android {
+namespace {
+// kTimestampFluctuation is an upper bound of timestamp fluctuation from the
+// source that GraphicBufferSource allows. The unit of kTimestampFluctuation is
+// frames. More specifically, GraphicBufferSource will drop a frame if
+//
+// expectedNewFrametimestamp - actualNewFrameTimestamp <
+// (0.5 - kTimestampFluctuation) * expectedtimePeriodBetweenFrames
+//
+// where
+// - expectedNewFrameTimestamp is the calculated ideal timestamp of the new
+// incoming frame
+// - actualNewFrameTimestamp is the timestamp received from the source
+// - expectedTimePeriodBetweenFrames is the ideal difference of the timestamps
+// of two adjacent frames
+//
+// See GraphicBufferSource::calculateCodecTimestamp_l() for more detail about
+// how kTimestampFluctuation is used.
+//
+// kTimestampFluctuation should be non-negative. A higher value causes a smaller
+// chance of dropping frames, but at the same time a higher bound on the
+// difference between the source timestamp and the interpreted (snapped)
+// timestamp.
+//
+// The value of 0.05 means that GraphicBufferSource expects the input timestamps
+// to fluctuate no more than 5% from the regular time period.
+//
+// TODO: Justify the choice of this value, or make it configurable.
+constexpr double kTimestampFluctuation = 0.05;
+}
+
/**
* A copiable object managing a buffer in the buffer cache managed by the producer. This object
* holds a reference to the buffer, and maintains which buffer slot it belongs to (if any), and
@@ -732,14 +762,16 @@
mFrameCount = 0;
} else {
// snap to nearest capture point
- int64_t nFrames = std::llround(
- (timeUs - mPrevCaptureUs) * mCaptureFps / 1000000);
- if (nFrames <= 0) {
+ double nFrames = (timeUs - mPrevCaptureUs) * mCaptureFps / 1000000;
+ if (nFrames < 0.5 - kTimestampFluctuation) {
// skip this frame as it's too close to previous capture
ALOGV("skipping frame, timeUs %lld", static_cast<long long>(timeUs));
return false;
}
- mFrameCount += nFrames;
+ if (nFrames <= 1.0) {
+ nFrames = 1.0;
+ }
+ mFrameCount += std::llround(nFrames);
mPrevCaptureUs = mBaseCaptureUs + std::llround(
mFrameCount * 1000000 / mCaptureFps);
mPrevFrameUs = mBaseFrameUs + std::llround(
diff --git a/media/mtp/MtpDatabase.h b/media/mtp/MtpDatabase.h
index 2395f4f..f3f9720 100644
--- a/media/mtp/MtpDatabase.h
+++ b/media/mtp/MtpDatabase.h
@@ -45,6 +45,8 @@
MtpObjectFormat format,
bool succeeded) = 0;
+ virtual void doScanDirectory(const char* path) = 0;
+
virtual MtpObjectHandleList* getObjectList(MtpStorageID storageID,
MtpObjectFormat format,
MtpObjectHandle parent) = 0;
diff --git a/media/mtp/MtpServer.cpp b/media/mtp/MtpServer.cpp
index 6080868..bb0414d 100644
--- a/media/mtp/MtpServer.cpp
+++ b/media/mtp/MtpServer.cpp
@@ -1148,6 +1148,7 @@
ALOGV("Copying file from %s to %s", (const char*)fromPath, (const char*)path);
if (format == MTP_FORMAT_ASSOCIATION) {
int ret = makeFolder((const char *)path);
+ ret += copyRecursive(fromPath, path);
if (ret) {
result = MTP_RESPONSE_GENERAL_ERROR;
}
@@ -1158,6 +1159,8 @@
}
mDatabase->endSendObject(path, handle, format, result);
+ if (format == MTP_FORMAT_ASSOCIATION)
+ mDatabase->doScanDirectory(path);
mResponse.setParameter(1, handle);
return result;
}
diff --git a/media/ndk/NdkMediaCodec.cpp b/media/ndk/NdkMediaCodec.cpp
index 11dedbb..6b20bca 100644
--- a/media/ndk/NdkMediaCodec.cpp
+++ b/media/ndk/NdkMediaCodec.cpp
@@ -52,6 +52,7 @@
enum {
kWhatActivityNotify,
+ kWhatAsyncNotify,
kWhatRequestActivityNotifications,
kWhatStopActivityNotifications,
};
@@ -88,6 +89,11 @@
bool mRequestedActivityNotification;
OnCodecEvent mCallback;
void *mCallbackUserData;
+
+ sp<AMessage> mAsyncNotify;
+ mutable Mutex mAsyncCallbackLock;
+ AMediaCodecOnAsyncNotifyCallback mAsyncCallback;
+ void *mAsyncCallbackUserData;
};
CodecHandler::CodecHandler(AMediaCodec *codec) {
@@ -128,6 +134,147 @@
break;
}
+ case kWhatAsyncNotify:
+ {
+ int32_t cbID;
+ if (!msg->findInt32("callbackID", &cbID)) {
+ ALOGE("kWhatAsyncNotify: callbackID is expected.");
+ break;
+ }
+
+ ALOGV("kWhatAsyncNotify: cbID = %d", cbID);
+
+ switch (cbID) {
+ case MediaCodec::CB_INPUT_AVAILABLE:
+ {
+ int32_t index;
+ if (!msg->findInt32("index", &index)) {
+ ALOGE("CB_INPUT_AVAILABLE: index is expected.");
+ break;
+ }
+
+ Mutex::Autolock _l(mCodec->mAsyncCallbackLock);
+ if (mCodec->mAsyncCallbackUserData != NULL
+ || mCodec->mAsyncCallback.onAsyncInputAvailable != NULL) {
+ mCodec->mAsyncCallback.onAsyncInputAvailable(
+ mCodec,
+ mCodec->mAsyncCallbackUserData,
+ index);
+ }
+
+ break;
+ }
+
+ case MediaCodec::CB_OUTPUT_AVAILABLE:
+ {
+ int32_t index;
+ size_t offset;
+ size_t size;
+ int64_t timeUs;
+ int32_t flags;
+
+ if (!msg->findInt32("index", &index)) {
+ ALOGE("CB_OUTPUT_AVAILABLE: index is expected.");
+ break;
+ }
+ if (!msg->findSize("offset", &offset)) {
+ ALOGE("CB_OUTPUT_AVAILABLE: offset is expected.");
+ break;
+ }
+ if (!msg->findSize("size", &size)) {
+ ALOGE("CB_OUTPUT_AVAILABLE: size is expected.");
+ break;
+ }
+ if (!msg->findInt64("timeUs", &timeUs)) {
+ ALOGE("CB_OUTPUT_AVAILABLE: timeUs is expected.");
+ break;
+ }
+ if (!msg->findInt32("flags", &flags)) {
+ ALOGE("CB_OUTPUT_AVAILABLE: flags is expected.");
+ break;
+ }
+
+ AMediaCodecBufferInfo bufferInfo = {
+ (int32_t)offset,
+ (int32_t)size,
+ timeUs,
+ (uint32_t)flags};
+
+ Mutex::Autolock _l(mCodec->mAsyncCallbackLock);
+ if (mCodec->mAsyncCallbackUserData != NULL
+ || mCodec->mAsyncCallback.onAsyncOutputAvailable != NULL) {
+ mCodec->mAsyncCallback.onAsyncOutputAvailable(
+ mCodec,
+ mCodec->mAsyncCallbackUserData,
+ index,
+ &bufferInfo);
+ }
+
+ break;
+ }
+
+ case MediaCodec::CB_OUTPUT_FORMAT_CHANGED:
+ {
+ sp<AMessage> format;
+ if (!msg->findMessage("format", &format)) {
+ ALOGE("CB_OUTPUT_FORMAT_CHANGED: format is expected.");
+ break;
+ }
+
+ AMediaFormat *aMediaFormat = AMediaFormat_fromMsg(&format);
+
+ Mutex::Autolock _l(mCodec->mAsyncCallbackLock);
+ if (mCodec->mAsyncCallbackUserData != NULL
+ || mCodec->mAsyncCallback.onAsyncFormatChanged != NULL) {
+ mCodec->mAsyncCallback.onAsyncFormatChanged(
+ mCodec,
+ mCodec->mAsyncCallbackUserData,
+ aMediaFormat);
+ }
+
+ break;
+ }
+
+ case MediaCodec::CB_ERROR:
+ {
+ status_t err;
+ int32_t actionCode;
+ AString detail;
+ if (!msg->findInt32("err", &err)) {
+ ALOGE("CB_ERROR: err is expected.");
+ break;
+ }
+ if (!msg->findInt32("action", &actionCode)) {
+ ALOGE("CB_ERROR: action is expected.");
+ break;
+ }
+ msg->findString("detail", &detail);
+ ALOGE("Decoder reported error(0x%x), actionCode(%d), detail(%s)",
+ err, actionCode, detail.c_str());
+
+ Mutex::Autolock _l(mCodec->mAsyncCallbackLock);
+ if (mCodec->mAsyncCallbackUserData != NULL
+ || mCodec->mAsyncCallback.onAsyncError != NULL) {
+ mCodec->mAsyncCallback.onAsyncError(
+ mCodec,
+ mCodec->mAsyncCallbackUserData,
+ translate_error(err),
+ actionCode,
+ detail.c_str());
+ }
+
+ break;
+ }
+
+ default:
+ {
+ ALOGE("kWhatAsyncNotify: callbackID(%d) is unexpected.", cbID);
+ break;
+ }
+ }
+ break;
+ }
+
case kWhatStopActivityNotifications:
{
sp<AReplyToken> replyID;
@@ -162,7 +309,7 @@
size_t res = mData->mLooper->start(
false, // runOnCallingThread
true, // canCallJava XXX
- PRIORITY_FOREGROUND);
+ PRIORITY_AUDIO);
if (res != OK) {
ALOGE("Failed to start the looper");
AMediaCodec_delete(mData);
@@ -183,6 +330,9 @@
mData->mRequestedActivityNotification = false;
mData->mCallback = NULL;
+ mData->mAsyncCallback = {};
+ mData->mAsyncCallbackUserData = NULL;
+
return mData;
}
@@ -222,6 +372,32 @@
}
EXPORT
+media_status_t AMediaCodec_getName(
+ AMediaCodec *mData,
+ char** out_name) {
+ if (out_name == NULL) {
+ return AMEDIA_ERROR_INVALID_PARAMETER;
+ }
+
+ AString compName;
+ status_t err = mData->mCodec->getName(&compName);
+ if (err != OK) {
+ return translate_error(err);
+ }
+ *out_name = strdup(compName.c_str());
+ return AMEDIA_OK;
+}
+
+EXPORT
+void AMediaCodec_releaseName(
+ AMediaCodec * /* mData */,
+ char* name) {
+ if (name != NULL) {
+ free(name);
+ }
+}
+
+EXPORT
media_status_t AMediaCodec_configure(
AMediaCodec *mData,
const AMediaFormat* format,
@@ -236,8 +412,40 @@
surface = (Surface*) window;
}
- return translate_error(mData->mCodec->configure(nativeFormat, surface,
- crypto ? crypto->mCrypto : NULL, flags));
+ status_t err = mData->mCodec->configure(nativeFormat, surface,
+ crypto ? crypto->mCrypto : NULL, flags);
+ if (err != OK) {
+ ALOGE("configure: err(%d), failed with format: %s",
+ err, nativeFormat->debugString(0).c_str());
+ }
+ return translate_error(err);
+}
+
+EXPORT
+media_status_t AMediaCodec_setAsyncNotifyCallback(
+ AMediaCodec *mData,
+ AMediaCodecOnAsyncNotifyCallback callback,
+ void *userdata) {
+ if (mData->mAsyncNotify == NULL && userdata != NULL) {
+ mData->mAsyncNotify = new AMessage(kWhatAsyncNotify, mData->mHandler);
+ status_t err = mData->mCodec->setCallback(mData->mAsyncNotify);
+ if (err != OK) {
+ ALOGE("setAsyncNotifyCallback: err(%d), failed to set async callback", err);
+ return translate_error(err);
+ }
+ }
+
+ Mutex::Autolock _l(mData->mAsyncCallbackLock);
+ mData->mAsyncCallback = callback;
+ mData->mAsyncCallbackUserData = userdata;
+
+ return AMEDIA_OK;
+}
+
+
+EXPORT
+media_status_t AMediaCodec_releaseCrypto(AMediaCodec *mData) {
+ return translate_error(mData->mCodec->releaseCrypto());
}
EXPORT
@@ -282,6 +490,19 @@
EXPORT
uint8_t* AMediaCodec_getInputBuffer(AMediaCodec *mData, size_t idx, size_t *out_size) {
+ if (mData->mAsyncNotify != NULL) {
+ // Asynchronous mode
+ sp<MediaCodecBuffer> abuf;
+ if (mData->mCodec->getInputBuffer(idx, &abuf) != 0) {
+ return NULL;
+ }
+
+ if (out_size != NULL) {
+ *out_size = abuf->capacity();
+ }
+ return abuf->data();
+ }
+
android::Vector<android::sp<android::MediaCodecBuffer> > abufs;
if (mData->mCodec->getInputBuffers(&abufs) == 0) {
size_t n = abufs.size();
@@ -304,6 +525,19 @@
EXPORT
uint8_t* AMediaCodec_getOutputBuffer(AMediaCodec *mData, size_t idx, size_t *out_size) {
+ if (mData->mAsyncNotify != NULL) {
+ // Asynchronous mode
+ sp<MediaCodecBuffer> abuf;
+ if (mData->mCodec->getOutputBuffer(idx, &abuf) != 0) {
+ return NULL;
+ }
+
+ if (out_size != NULL) {
+ *out_size = abuf->capacity();
+ }
+ return abuf->data();
+ }
+
android::Vector<android::sp<android::MediaCodecBuffer> > abufs;
if (mData->mCodec->getOutputBuffers(&abufs) == 0) {
size_t n = abufs.size();
@@ -367,6 +601,13 @@
}
EXPORT
+AMediaFormat* AMediaCodec_getInputFormat(AMediaCodec *mData) {
+ sp<AMessage> format;
+ mData->mCodec->getInputFormat(&format);
+ return AMediaFormat_fromMsg(&format);
+}
+
+EXPORT
AMediaFormat* AMediaCodec_getBufferFormat(AMediaCodec *mData, size_t index) {
sp<AMessage> format;
mData->mCodec->getOutputFormat(index, &format);
@@ -542,6 +783,16 @@
return translate_error(err);
}
+EXPORT
+bool AMediaCodecActionCode_isRecoverable(int32_t actionCode) {
+ return (actionCode == ACTION_CODE_RECOVERABLE);
+}
+
+EXPORT
+bool AMediaCodecActionCode_isTransient(int32_t actionCode) {
+ return (actionCode == ACTION_CODE_TRANSIENT);
+}
+
EXPORT
void AMediaCodecCryptoInfo_setPattern(AMediaCodecCryptoInfo *info,
diff --git a/media/ndk/NdkMediaFormat.cpp b/media/ndk/NdkMediaFormat.cpp
index ee27520..a9025c0 100644
--- a/media/ndk/NdkMediaFormat.cpp
+++ b/media/ndk/NdkMediaFormat.cpp
@@ -125,6 +125,14 @@
ret.appendFormat("double(%f)", val);
break;
}
+ case AMessage::kTypeRect:
+ {
+ int32_t left, top, right, bottom;
+ f->findRect(name, &left, &top, &right, &bottom);
+ ret.appendFormat("Rect(%" PRId32 ", %" PRId32 ", %" PRId32 ", %" PRId32 ")",
+ left, top, right, bottom);
+ break;
+ }
case AMessage::kTypeString:
{
AString val;
@@ -165,11 +173,22 @@
}
EXPORT
+bool AMediaFormat_getDouble(AMediaFormat* format, const char *name, double *out) {
+ return format->mFormat->findDouble(name, out);
+}
+
+EXPORT
bool AMediaFormat_getSize(AMediaFormat* format, const char *name, size_t *out) {
return format->mFormat->findSize(name, out);
}
EXPORT
+bool AMediaFormat_getRect(AMediaFormat* format, const char *name,
+ int32_t *left, int32_t *top, int32_t *right, int32_t *bottom) {
+ return format->mFormat->findRect(name, left, top, right, bottom);
+}
+
+EXPORT
bool AMediaFormat_getBuffer(AMediaFormat* format, const char *name, void** data, size_t *outsize) {
sp<ABuffer> buf;
if (format->mFormat->findBuffer(name, &buf)) {
@@ -216,6 +235,22 @@
}
EXPORT
+void AMediaFormat_setDouble(AMediaFormat* format, const char* name, double value) {
+ format->mFormat->setDouble(name, value);
+}
+
+EXPORT
+void AMediaFormat_setSize(AMediaFormat* format, const char* name, size_t value) {
+ format->mFormat->setSize(name, value);
+}
+
+EXPORT
+void AMediaFormat_setRect(AMediaFormat* format, const char *name,
+ int32_t left, int32_t top, int32_t right, int32_t bottom) {
+ format->mFormat->setRect(name, left, top, right, bottom);
+}
+
+EXPORT
void AMediaFormat_setString(AMediaFormat* format, const char* name, const char* value) {
// AMessage::setString() makes a copy of the string
format->mFormat->setString(name, value, strlen(value));
@@ -233,30 +268,61 @@
}
+EXPORT const char* AMEDIAFORMAT_KEY_AAC_DRC_ATTENUATION_FACTOR = "aac-drc-cut-level";
+EXPORT const char* AMEDIAFORMAT_KEY_AAC_DRC_BOOST_FACTOR = "aac-drc-boost-level";
+EXPORT const char* AMEDIAFORMAT_KEY_AAC_DRC_HEAVY_COMPRESSION = "aac-drc-heavy-compression";
+EXPORT const char* AMEDIAFORMAT_KEY_AAC_DRC_TARGET_REFERENCE_LEVEL = "aac-target-ref-level";
+EXPORT const char* AMEDIAFORMAT_KEY_AAC_ENCODED_TARGET_LEVEL = "aac-encoded-target-level";
+EXPORT const char* AMEDIAFORMAT_KEY_AAC_MAX_OUTPUT_CHANNEL_COUNT = "aac-max-output-channel_count";
EXPORT const char* AMEDIAFORMAT_KEY_AAC_PROFILE = "aac-profile";
+EXPORT const char* AMEDIAFORMAT_KEY_AAC_SBR_MODE = "aac-sbr-mode";
+EXPORT const char* AMEDIAFORMAT_KEY_AUDIO_SESSION_ID = "audio-session-id";
+EXPORT const char* AMEDIAFORMAT_KEY_BITRATE_MODE = "bitrate-mode";
EXPORT const char* AMEDIAFORMAT_KEY_BIT_RATE = "bitrate";
+EXPORT const char* AMEDIAFORMAT_KEY_CAPTURE_RATE = "capture-rate";
EXPORT const char* AMEDIAFORMAT_KEY_CHANNEL_COUNT = "channel-count";
EXPORT const char* AMEDIAFORMAT_KEY_CHANNEL_MASK = "channel-mask";
EXPORT const char* AMEDIAFORMAT_KEY_COLOR_FORMAT = "color-format";
+EXPORT const char* AMEDIAFORMAT_KEY_COLOR_RANGE = "color-range";
+EXPORT const char* AMEDIAFORMAT_KEY_COLOR_STANDARD = "color-standard";
+EXPORT const char* AMEDIAFORMAT_KEY_COLOR_TRANSFER = "color-transfer";
+EXPORT const char* AMEDIAFORMAT_KEY_COMPLEXITY = "complexity";
+EXPORT const char* AMEDIAFORMAT_KEY_DISPLAY_CROP = "crop";
EXPORT const char* AMEDIAFORMAT_KEY_DURATION = "durationUs";
EXPORT const char* AMEDIAFORMAT_KEY_FLAC_COMPRESSION_LEVEL = "flac-compression-level";
EXPORT const char* AMEDIAFORMAT_KEY_FRAME_RATE = "frame-rate";
+EXPORT const char* AMEDIAFORMAT_KEY_GRID_COLS = "grid-cols";
+EXPORT const char* AMEDIAFORMAT_KEY_GRID_HEIGHT = "grid-height";
+EXPORT const char* AMEDIAFORMAT_KEY_GRID_ROWS = "grid-rows";
+EXPORT const char* AMEDIAFORMAT_KEY_GRID_WIDTH = "grid-width";
+EXPORT const char* AMEDIAFORMAT_KEY_HDR_STATIC_INFO = "hdr-static-info";
EXPORT const char* AMEDIAFORMAT_KEY_HEIGHT = "height";
+EXPORT const char* AMEDIAFORMAT_KEY_INTRA_REFRESH_PERIOD = "intra-refresh-period";
EXPORT const char* AMEDIAFORMAT_KEY_IS_ADTS = "is-adts";
EXPORT const char* AMEDIAFORMAT_KEY_IS_AUTOSELECT = "is-autoselect";
EXPORT const char* AMEDIAFORMAT_KEY_IS_DEFAULT = "is-default";
EXPORT const char* AMEDIAFORMAT_KEY_IS_FORCED_SUBTITLE = "is-forced-subtitle";
EXPORT const char* AMEDIAFORMAT_KEY_I_FRAME_INTERVAL = "i-frame-interval";
EXPORT const char* AMEDIAFORMAT_KEY_LANGUAGE = "language";
+EXPORT const char* AMEDIAFORMAT_KEY_LATENCY = "latency";
+EXPORT const char* AMEDIAFORMAT_KEY_LEVEL = "level";
EXPORT const char* AMEDIAFORMAT_KEY_MAX_HEIGHT = "max-height";
EXPORT const char* AMEDIAFORMAT_KEY_MAX_INPUT_SIZE = "max-input-size";
EXPORT const char* AMEDIAFORMAT_KEY_MAX_WIDTH = "max-width";
EXPORT const char* AMEDIAFORMAT_KEY_MIME = "mime";
+EXPORT const char* AMEDIAFORMAT_KEY_OPERATING_RATE = "operating-rate";
+EXPORT const char* AMEDIAFORMAT_KEY_PCM_ENCODING = "pcm-encoding";
+EXPORT const char* AMEDIAFORMAT_KEY_PRIORITY = "priority";
+EXPORT const char* AMEDIAFORMAT_KEY_PROFILE = "profile";
EXPORT const char* AMEDIAFORMAT_KEY_PUSH_BLANK_BUFFERS_ON_STOP = "push-blank-buffers-on-shutdown";
EXPORT const char* AMEDIAFORMAT_KEY_REPEAT_PREVIOUS_FRAME_AFTER = "repeat-previous-frame-after";
+EXPORT const char* AMEDIAFORMAT_KEY_ROTATION = "rotation-degrees";
EXPORT const char* AMEDIAFORMAT_KEY_SAMPLE_RATE = "sample-rate";
-EXPORT const char* AMEDIAFORMAT_KEY_WIDTH = "width";
+EXPORT const char* AMEDIAFORMAT_KEY_SLICE_HEIGHT = "slice-height";
EXPORT const char* AMEDIAFORMAT_KEY_STRIDE = "stride";
+EXPORT const char* AMEDIAFORMAT_KEY_TEMPORAL_LAYERING = "ts-schema";
+EXPORT const char* AMEDIAFORMAT_KEY_TRACK_ID = "track-id";
+EXPORT const char* AMEDIAFORMAT_KEY_WIDTH = "width";
} // extern "C"
diff --git a/media/ndk/include/media/NdkMediaCodec.h b/media/ndk/include/media/NdkMediaCodec.h
index b15de38..f4a51d0 100644
--- a/media/ndk/include/media/NdkMediaCodec.h
+++ b/media/ndk/include/media/NdkMediaCodec.h
@@ -53,11 +53,63 @@
typedef struct AMediaCodecCryptoInfo AMediaCodecCryptoInfo;
enum {
+ AMEDIACODEC_BUFFER_FLAG_CODEC_CONFIG = 2,
AMEDIACODEC_BUFFER_FLAG_END_OF_STREAM = 4,
+ AMEDIACODEC_BUFFER_FLAG_PARTIAL_FRAME = 8,
+
AMEDIACODEC_CONFIGURE_FLAG_ENCODE = 1,
AMEDIACODEC_INFO_OUTPUT_BUFFERS_CHANGED = -3,
AMEDIACODEC_INFO_OUTPUT_FORMAT_CHANGED = -2,
- AMEDIACODEC_INFO_TRY_AGAIN_LATER = -1
+ AMEDIACODEC_INFO_TRY_AGAIN_LATER = -1,
+};
+
+/**
+ * Called when an input buffer becomes available.
+ * The specified index is the index of the available input buffer.
+ */
+typedef void (*AMediaCodecOnAsyncInputAvailable)(
+ AMediaCodec *codec,
+ void *userdata,
+ int32_t index);
+/**
+ * Called when an output buffer becomes available.
+ * The specified index is the index of the available output buffer.
+ * The specified bufferInfo contains information regarding the available output buffer.
+ */
+typedef void (*AMediaCodecOnAsyncOutputAvailable)(
+ AMediaCodec *codec,
+ void *userdata,
+ int32_t index,
+ AMediaCodecBufferInfo *bufferInfo);
+/**
+ * Called when the output format has changed.
+ * The specified format contains the new output format.
+ */
+typedef void (*AMediaCodecOnAsyncFormatChanged)(
+ AMediaCodec *codec,
+ void *userdata,
+ AMediaFormat *format);
+/**
+ * Called when the MediaCodec encountered an error.
+ * The specified actionCode indicates the possible actions that client can take,
+ * and it can be checked by calling AMediaCodecActionCode_isRecoverable or
+ * AMediaCodecActionCode_isTransient. If both AMediaCodecActionCode_isRecoverable()
+ * and AMediaCodecActionCode_isTransient() return false, then the codec error is fatal
+ * and the codec must be deleted.
+ * The specified detail may contain more detailed messages about this error.
+ */
+typedef void (*AMediaCodecOnAsyncError)(
+ AMediaCodec *codec,
+ void *userdata,
+ media_status_t error,
+ int32_t actionCode,
+ const char *detail);
+
+struct AMediaCodecOnAsyncNotifyCallback {
+ AMediaCodecOnAsyncInputAvailable onAsyncInputAvailable;
+ AMediaCodecOnAsyncOutputAvailable onAsyncOutputAvailable;
+ AMediaCodecOnAsyncFormatChanged onAsyncFormatChanged;
+ AMediaCodecOnAsyncError onAsyncError;
};
#if __ANDROID_API__ >= 21
@@ -289,6 +341,71 @@
#endif /* __ANDROID_API__ >= 26 */
+#if __ANDROID_API__ >= 28
+
+/**
+ * Get the component name. If the codec was created by createDecoderByType
+ * or createEncoderByType, what component is chosen is not known beforehand.
+ * Caller shall call AMediaCodec_releaseName to free the returned pointer.
+ */
+media_status_t AMediaCodec_getName(AMediaCodec*, char** out_name);
+
+/**
+ * Free the memory pointed by name which is returned by AMediaCodec_getName.
+ */
+void AMediaCodec_releaseName(AMediaCodec*, char* name);
+
+/**
+ * Set an asynchronous callback for actionable AMediaCodec events.
+ * When asynchronous callback is enabled, the client should not call
+ * AMediaCodec_getInputBuffers(), AMediaCodec_getOutputBuffers(),
+ * AMediaCodec_dequeueInputBuffer() or AMediaCodec_dequeueOutputBuffer().
+ *
+ * Also, AMediaCodec_flush() behaves differently in asynchronous mode.
+ * After calling AMediaCodec_flush(), you must call AMediaCodec_start() to
+ * "resume" receiving input buffers, even if an input surface was created.
+ *
+ * Refer to the definition of AMediaCodecOnAsyncNotifyCallback on how each
+ * callback function is called and what are specified.
+ * The specified userdata is the pointer used when those callback functions are
+ * called.
+ *
+ * All callbacks are fired on one NDK internal thread.
+ * AMediaCodec_setAsyncNotifyCallback should not be called on the callback thread.
+ * No heavy duty task should be performed on callback thread.
+ */
+media_status_t AMediaCodec_setAsyncNotifyCallback(
+ AMediaCodec*,
+ AMediaCodecOnAsyncNotifyCallback callback,
+ void *userdata);
+
+/**
+ * Release the crypto if applicable.
+ */
+media_status_t AMediaCodec_releaseCrypto(AMediaCodec*);
+
+/**
+ * Call this after AMediaCodec_configure() returns successfully to get the input
+ * format accepted by the codec. Do this to determine what optional configuration
+ * parameters were supported by the codec.
+ */
+AMediaFormat* AMediaCodec_getInputFormat(AMediaCodec*);
+
+/**
+ * Returns true if the codec cannot proceed further, but can be recovered by stopping,
+ * configuring, and starting again.
+ */
+bool AMediaCodecActionCode_isRecoverable(int32_t actionCode);
+
+/**
+ * Returns true if the codec error is a transient issue, perhaps due to
+ * resource constraints, and that the method (or encoding/decoding) may be
+ * retried at a later time.
+ */
+bool AMediaCodecActionCode_isTransient(int32_t actionCode);
+
+#endif /* __ANDROID_API__ >= 28 */
+
typedef enum {
AMEDIACODECRYPTOINFO_MODE_CLEAR = 0,
AMEDIACODECRYPTOINFO_MODE_AES_CTR = 1,
diff --git a/media/ndk/include/media/NdkMediaError.h b/media/ndk/include/media/NdkMediaError.h
index da61b64..e48fcbe 100644
--- a/media/ndk/include/media/NdkMediaError.h
+++ b/media/ndk/include/media/NdkMediaError.h
@@ -35,6 +35,17 @@
typedef enum {
AMEDIA_OK = 0,
+ /**
+ * This indicates required resource was not able to be allocated.
+ */
+ AMEDIACODEC_ERROR_INSUFFICIENT_RESOURCE = 1100,
+
+ /**
+ * This indicates the resource manager reclaimed the media resource used by the codec.
+ * With this error, the codec must be released, as it has moved to terminal state.
+ */
+ AMEDIACODEC_ERROR_RECLAIMED = 1101,
+
AMEDIA_ERROR_BASE = -10000,
AMEDIA_ERROR_UNKNOWN = AMEDIA_ERROR_BASE,
AMEDIA_ERROR_MALFORMED = AMEDIA_ERROR_BASE - 1,
diff --git a/media/ndk/include/media/NdkMediaFormat.h b/media/ndk/include/media/NdkMediaFormat.h
index 018ab76..b6489c7 100644
--- a/media/ndk/include/media/NdkMediaFormat.h
+++ b/media/ndk/include/media/NdkMediaFormat.h
@@ -51,6 +51,7 @@
bool AMediaFormat_getInt32(AMediaFormat*, const char *name, int32_t *out);
bool AMediaFormat_getInt64(AMediaFormat*, const char *name, int64_t *out);
bool AMediaFormat_getFloat(AMediaFormat*, const char *name, float *out);
+bool AMediaFormat_getSize(AMediaFormat*, const char *name, size_t *out);
/**
* The returned data is owned by the format and remains valid as long as the named entry
* is part of the format.
@@ -80,33 +81,75 @@
/**
* XXX should these be ints/enums that we look up in a table as needed?
*/
+extern const char* AMEDIAFORMAT_KEY_AAC_DRC_ATTENUATION_FACTOR;
+extern const char* AMEDIAFORMAT_KEY_AAC_DRC_BOOST_FACTOR;
+extern const char* AMEDIAFORMAT_KEY_AAC_DRC_HEAVY_COMPRESSION;
+extern const char* AMEDIAFORMAT_KEY_AAC_DRC_TARGET_REFERENCE_LEVEL;
+extern const char* AMEDIAFORMAT_KEY_AAC_ENCODED_TARGET_LEVEL;
+extern const char* AMEDIAFORMAT_KEY_AAC_MAX_OUTPUT_CHANNEL_COUNT;
extern const char* AMEDIAFORMAT_KEY_AAC_PROFILE;
+extern const char* AMEDIAFORMAT_KEY_AAC_SBR_MODE;
+extern const char* AMEDIAFORMAT_KEY_AUDIO_SESSION_ID;
+extern const char* AMEDIAFORMAT_KEY_BITRATE_MODE;
extern const char* AMEDIAFORMAT_KEY_BIT_RATE;
+extern const char* AMEDIAFORMAT_KEY_CAPTURE_RATE;
extern const char* AMEDIAFORMAT_KEY_CHANNEL_COUNT;
extern const char* AMEDIAFORMAT_KEY_CHANNEL_MASK;
extern const char* AMEDIAFORMAT_KEY_COLOR_FORMAT;
+extern const char* AMEDIAFORMAT_KEY_COLOR_RANGE;
+extern const char* AMEDIAFORMAT_KEY_COLOR_STANDARD;
+extern const char* AMEDIAFORMAT_KEY_COLOR_TRANSFER;
+extern const char* AMEDIAFORMAT_KEY_COMPLEXITY;
+extern const char* AMEDIAFORMAT_KEY_DISPLAY_CROP;
extern const char* AMEDIAFORMAT_KEY_DURATION;
extern const char* AMEDIAFORMAT_KEY_FLAC_COMPRESSION_LEVEL;
extern const char* AMEDIAFORMAT_KEY_FRAME_RATE;
+extern const char* AMEDIAFORMAT_KEY_GRID_COLS;
+extern const char* AMEDIAFORMAT_KEY_GRID_HEIGHT;
+extern const char* AMEDIAFORMAT_KEY_GRID_ROWS;
+extern const char* AMEDIAFORMAT_KEY_GRID_WIDTH;
+extern const char* AMEDIAFORMAT_KEY_HDR_STATIC_INFO;
extern const char* AMEDIAFORMAT_KEY_HEIGHT;
+extern const char* AMEDIAFORMAT_KEY_INTRA_REFRESH_PERIOD;
extern const char* AMEDIAFORMAT_KEY_IS_ADTS;
extern const char* AMEDIAFORMAT_KEY_IS_AUTOSELECT;
extern const char* AMEDIAFORMAT_KEY_IS_DEFAULT;
extern const char* AMEDIAFORMAT_KEY_IS_FORCED_SUBTITLE;
extern const char* AMEDIAFORMAT_KEY_I_FRAME_INTERVAL;
extern const char* AMEDIAFORMAT_KEY_LANGUAGE;
+extern const char* AMEDIAFORMAT_KEY_LATENCY;
+extern const char* AMEDIAFORMAT_KEY_LEVEL;
extern const char* AMEDIAFORMAT_KEY_MAX_HEIGHT;
extern const char* AMEDIAFORMAT_KEY_MAX_INPUT_SIZE;
extern const char* AMEDIAFORMAT_KEY_MAX_WIDTH;
extern const char* AMEDIAFORMAT_KEY_MIME;
+extern const char* AMEDIAFORMAT_KEY_OPERATING_RATE;
+extern const char* AMEDIAFORMAT_KEY_PCM_ENCODING;
+extern const char* AMEDIAFORMAT_KEY_PRIORITY;
+extern const char* AMEDIAFORMAT_KEY_PROFILE;
extern const char* AMEDIAFORMAT_KEY_PUSH_BLANK_BUFFERS_ON_STOP;
extern const char* AMEDIAFORMAT_KEY_REPEAT_PREVIOUS_FRAME_AFTER;
+extern const char* AMEDIAFORMAT_KEY_ROTATION;
extern const char* AMEDIAFORMAT_KEY_SAMPLE_RATE;
-extern const char* AMEDIAFORMAT_KEY_WIDTH;
+extern const char* AMEDIAFORMAT_KEY_SLICE_HEIGHT;
extern const char* AMEDIAFORMAT_KEY_STRIDE;
+extern const char* AMEDIAFORMAT_KEY_TEMPORAL_LAYERING;
+extern const char* AMEDIAFORMAT_KEY_TRACK_ID;
+extern const char* AMEDIAFORMAT_KEY_WIDTH;
#endif /* __ANDROID_API__ >= 21 */
+#if __ANDROID_API__ >= 28
+bool AMediaFormat_getDouble(AMediaFormat*, const char *name, double *out);
+bool AMediaFormat_getRect(AMediaFormat*, const char *name,
+ int32_t *left, int32_t *top, int32_t *right, int32_t *bottom);
+
+void AMediaFormat_setDouble(AMediaFormat*, const char* name, double value);
+void AMediaFormat_setSize(AMediaFormat*, const char* name, size_t value);
+void AMediaFormat_setRect(AMediaFormat*, const char* name,
+ int32_t left, int32_t top, int32_t right, int32_t bottom);
+#endif /* __ANDROID_API__ >= 28 */
+
__END_DECLS
#endif // _NDK_MEDIA_FORMAT_H
diff --git a/media/ndk/libmediandk.map.txt b/media/ndk/libmediandk.map.txt
index d7ad370..f2d97cd 100644
--- a/media/ndk/libmediandk.map.txt
+++ b/media/ndk/libmediandk.map.txt
@@ -26,30 +26,63 @@
AImage_getPlaneRowStride; # introduced=24
AImage_getTimestamp; # introduced=24
AImage_getWidth; # introduced=24
+ AMEDIAFORMAT_KEY_AAC_DRC_ATTENUATION_FACTOR; # var introduced=28
+ AMEDIAFORMAT_KEY_AAC_DRC_BOOST_FACTOR; # var introduced=28
+ AMEDIAFORMAT_KEY_AAC_DRC_HEAVY_COMPRESSION; # var introduced=28
+ AMEDIAFORMAT_KEY_AAC_DRC_TARGET_REFERENCE_LEVEL; # var introduced=28
+ AMEDIAFORMAT_KEY_AAC_ENCODED_TARGET_LEVEL; # var introduced=28
+ AMEDIAFORMAT_KEY_AAC_MAX_OUTPUT_CHANNEL_COUNT; # var introduced=28
AMEDIAFORMAT_KEY_AAC_PROFILE; # var
+ AMEDIAFORMAT_KEY_AAC_SBR_MODE; # var introduced=28
+ AMEDIAFORMAT_KEY_AUDIO_SESSION_ID; # var introduced=28
+ AMEDIAFORMAT_KEY_BITRATE_MODE; # var introduced=28
AMEDIAFORMAT_KEY_BIT_RATE; # var
+ AMEDIAFORMAT_KEY_CAPTURE_RATE; # var introduced=28
AMEDIAFORMAT_KEY_CHANNEL_COUNT; # var
AMEDIAFORMAT_KEY_CHANNEL_MASK; # var
AMEDIAFORMAT_KEY_COLOR_FORMAT; # var
+ AMEDIAFORMAT_KEY_COLOR_RANGE; # var introduced=28
+ AMEDIAFORMAT_KEY_COLOR_STANDARD; # var introduced=28
+ AMEDIAFORMAT_KEY_COLOR_TRANSFER; # var introduced=28
+ AMEDIAFORMAT_KEY_COMPLEXITY; # var introduced=28
+ AMEDIAFORMAT_KEY_DISPLAY_CROP; # var introduced=28
AMEDIAFORMAT_KEY_DURATION; # var
AMEDIAFORMAT_KEY_FLAC_COMPRESSION_LEVEL; # var
AMEDIAFORMAT_KEY_FRAME_RATE; # var
+ AMEDIAFORMAT_KEY_GRID_COLS; # var introduced=28
+ AMEDIAFORMAT_KEY_GRID_HEIGHT; # var introduced=28
+ AMEDIAFORMAT_KEY_GRID_ROWS; # var introduced=28
+ AMEDIAFORMAT_KEY_GRID_WIDTH; # var introduced=28
+ AMEDIAFORMAT_KEY_HDR_STATIC_INFO; # var introduced=28
AMEDIAFORMAT_KEY_HEIGHT; # var
+ AMEDIAFORMAT_KEY_INTRA_REFRESH_PERIOD; # var introduced=28
AMEDIAFORMAT_KEY_IS_ADTS; # var
AMEDIAFORMAT_KEY_IS_AUTOSELECT; # var
AMEDIAFORMAT_KEY_IS_DEFAULT; # var
AMEDIAFORMAT_KEY_IS_FORCED_SUBTITLE; # var
AMEDIAFORMAT_KEY_I_FRAME_INTERVAL; # var
AMEDIAFORMAT_KEY_LANGUAGE; # var
+ AMEDIAFORMAT_KEY_LATENCY; # var introduced=28
+ AMEDIAFORMAT_KEY_LEVEL; # var introduced=28
AMEDIAFORMAT_KEY_MAX_HEIGHT; # var
AMEDIAFORMAT_KEY_MAX_INPUT_SIZE; # var
AMEDIAFORMAT_KEY_MAX_WIDTH; # var
AMEDIAFORMAT_KEY_MIME; # var
+ AMEDIAFORMAT_KEY_OPERATING_RATE; # var introduced=28
+ AMEDIAFORMAT_KEY_PCM_ENCODING; # var introduced=28
+ AMEDIAFORMAT_KEY_PRIORITY; # var introduced=28
+ AMEDIAFORMAT_KEY_PROFILE; # var introduced=28
AMEDIAFORMAT_KEY_PUSH_BLANK_BUFFERS_ON_STOP; # var
AMEDIAFORMAT_KEY_REPEAT_PREVIOUS_FRAME_AFTER; # var
+ AMEDIAFORMAT_KEY_ROTATION; # var introduced=28
AMEDIAFORMAT_KEY_SAMPLE_RATE; # var
+ AMEDIAFORMAT_KEY_SLICE_HEIGHT; # var introduced=28
AMEDIAFORMAT_KEY_STRIDE; # var
+ AMEDIAFORMAT_KEY_TEMPORAL_LAYERING; # var introduced=28
+ AMEDIAFORMAT_KEY_TRACK_ID; # var introduced=28
AMEDIAFORMAT_KEY_WIDTH; # var
+ AMediaCodecActionCode_isRecoverable; # introduced=28
+ AMediaCodecActionCode_isTransient; # introduced=28
AMediaCodecCryptoInfo_delete;
AMediaCodecCryptoInfo_getClearBytes;
AMediaCodecCryptoInfo_getEncryptedBytes;
@@ -68,12 +101,16 @@
AMediaCodec_dequeueOutputBuffer;
AMediaCodec_flush;
AMediaCodec_getInputBuffer;
+ AMediaCodec_getInputFormat; # introduced=28
+ AMediaCodec_getName; # introduced=28
AMediaCodec_getOutputBuffer;
AMediaCodec_getOutputFormat;
AMediaCodec_queueInputBuffer;
AMediaCodec_queueSecureInputBuffer;
+ AMediaCodec_releaseCrypto; # introduced=28
AMediaCodec_releaseOutputBuffer;
AMediaCodec_releaseOutputBufferAtTime;
+ AMediaCodec_setAsyncNotifyCallback; # introduced=28
AMediaCodec_setOutputSurface; # introduced=24
AMediaCodec_setParameters; # introduced=26
AMediaCodec_setInputSurface; # introduced=26
@@ -127,16 +164,21 @@
AMediaExtractor_unselectTrack;
AMediaFormat_delete;
AMediaFormat_getBuffer;
+ AMediaFormat_getDouble; # introduced=28
AMediaFormat_getFloat;
AMediaFormat_getInt32;
AMediaFormat_getInt64;
+ AMediaFormat_getRect; # introduced=28
AMediaFormat_getSize;
AMediaFormat_getString;
AMediaFormat_new;
AMediaFormat_setBuffer;
+ AMediaFormat_setDouble; # introduced=28
AMediaFormat_setFloat;
AMediaFormat_setInt32;
AMediaFormat_setInt64;
+ AMediaFormat_setRect; # introduced=28
+ AMediaFormat_setSize; # introduced=28
AMediaFormat_setString;
AMediaFormat_toString;
AMediaMuxer_addTrack;
diff --git a/services/audioflinger/Effects.cpp b/services/audioflinger/Effects.cpp
index e0d0d7b..ef6e223 100644
--- a/services/audioflinger/Effects.cpp
+++ b/services/audioflinger/Effects.cpp
@@ -328,21 +328,21 @@
} else {
{ // convert input to int16_t as effect doesn't support float.
if (!auxType) {
- if (mInBuffer16.get() == nullptr) {
- ALOGW("%s: mInBuffer16 is null, bypassing", __func__);
+ if (mInConversionBuffer.get() == nullptr) {
+ ALOGW("%s: mInConversionBuffer is null, bypassing", __func__);
goto data_bypass;
}
const float * const pIn = mInBuffer->audioBuffer()->f32;
- int16_t * const pIn16 = mInBuffer16->audioBuffer()->s16;
+ int16_t * const pIn16 = mInConversionBuffer->audioBuffer()->s16;
memcpy_to_i16_from_float(
pIn16, pIn, inChannelCount * mConfig.inputCfg.buffer.frameCount);
}
if (mConfig.outputCfg.accessMode == EFFECT_BUFFER_ACCESS_ACCUMULATE) {
- if (mOutBuffer16.get() == nullptr) {
- ALOGW("%s: mOutBuffer16 is null, bypassing", __func__);
+ if (mOutConversionBuffer.get() == nullptr) {
+ ALOGW("%s: mOutConversionBuffer is null, bypassing", __func__);
goto data_bypass;
}
- int16_t * const pOut16 = mOutBuffer16->audioBuffer()->s16;
+ int16_t * const pOut16 = mOutConversionBuffer->audioBuffer()->s16;
const float * const pOut = mOutBuffer->audioBuffer()->f32;
memcpy_to_i16_from_float(
pOut16,
@@ -354,7 +354,7 @@
ret = mEffectInterface->process();
{ // convert output back to float.
- const int16_t * const pOut16 = mOutBuffer16->audioBuffer()->s16;
+ const int16_t * const pOut16 = mOutConversionBuffer->audioBuffer()->s16;
float * const pOut = mOutBuffer->audioBuffer()->f32;
memcpy_to_float_from_i16(
pOut, pOut16, outChannelCount * mConfig.outputCfg.buffer.frameCount);
@@ -906,7 +906,7 @@
mEffectInterface->setInBuffer(buffer);
#ifdef FLOAT_EFFECT_CHAIN
- // aux effects do in place conversion to float - we don't allocate mInBuffer16 for them.
+ // aux effects do in place conversion to float - we don't allocate mInConversionBuffer.
// Theoretically insert effects can also do in-place conversions (destroying
// the original buffer) when the output buffer is identical to the input buffer,
// but we don't optimize for it here.
@@ -920,17 +920,18 @@
ALOGV("%s: setInBuffer updating for inChannels:%d inFrameCount:%zu total size:%zu",
__func__, inChannels, inFrameCount, size);
- if (size > 0 && (mInBuffer16.get() == nullptr || size > mInBuffer16->getSize())) {
- mInBuffer16.clear();
- ALOGV("%s: allocating mInBuffer16 %zu", __func__, size);
- (void)EffectBufferHalInterface::allocate(size, &mInBuffer16);
+ if (size > 0 && (mInConversionBuffer.get() == nullptr
+ || size > mInConversionBuffer->getSize())) {
+ mInConversionBuffer.clear();
+ ALOGV("%s: allocating mInConversionBuffer %zu", __func__, size);
+ (void)EffectBufferHalInterface::allocate(size, &mInConversionBuffer);
}
- if (mInBuffer16.get() != nullptr) {
+ if (mInConversionBuffer.get() != nullptr) {
// FIXME: confirm buffer has enough size.
- mInBuffer16->setFrameCount(inFrameCount);
- mEffectInterface->setInBuffer(mInBuffer16);
+ mInConversionBuffer->setFrameCount(inFrameCount);
+ mEffectInterface->setInBuffer(mInConversionBuffer);
} else if (size > 0) {
- ALOGE("%s cannot create mInBuffer16", __func__);
+ ALOGE("%s cannot create mInConversionBuffer", __func__);
}
}
#endif
@@ -948,7 +949,7 @@
mEffectInterface->setOutBuffer(buffer);
#ifdef FLOAT_EFFECT_CHAIN
- // Note: Any effect that does not accumulate does not need mOutBuffer16 and
+ // Note: Any effect that does not accumulate does not need mOutConversionBuffer and
// can do in-place conversion from int16_t to float. We don't optimize here.
if (!mSupportsFloat && mOutBuffer.get() != nullptr) {
const size_t outFrameCount = mConfig.outputCfg.buffer.frameCount;
@@ -958,16 +959,17 @@
ALOGV("%s: setOutBuffer updating for outChannels:%d outFrameCount:%zu total size:%zu",
__func__, outChannels, outFrameCount, size);
- if (size > 0 && (mOutBuffer16.get() == nullptr || size > mOutBuffer16->getSize())) {
- mOutBuffer16.clear();
- ALOGV("%s: allocating mOutBuffer16 %zu", __func__, size);
- (void)EffectBufferHalInterface::allocate(size, &mOutBuffer16);
+ if (size > 0 && (mOutConversionBuffer.get() == nullptr
+ || size > mOutConversionBuffer->getSize())) {
+ mOutConversionBuffer.clear();
+ ALOGV("%s: allocating mOutConversionBuffer %zu", __func__, size);
+ (void)EffectBufferHalInterface::allocate(size, &mOutConversionBuffer);
}
- if (mOutBuffer16.get() != nullptr) {
- mOutBuffer16->setFrameCount(outFrameCount);
- mEffectInterface->setOutBuffer(mOutBuffer16);
+ if (mOutConversionBuffer.get() != nullptr) {
+ mOutConversionBuffer->setFrameCount(outFrameCount);
+ mEffectInterface->setOutBuffer(mOutConversionBuffer);
} else if (size > 0) {
- ALOGE("%s cannot create mOutBuffer16", __func__);
+ ALOGE("%s cannot create mOutConversionBuffer", __func__);
}
}
#endif
@@ -1241,6 +1243,20 @@
return s;
}
+static std::string dumpInOutBuffer(bool isInput, const sp<EffectBufferHalInterface> &buffer) {
+ std::stringstream ss;
+
+ if (buffer.get() == nullptr) {
+ return "nullptr"; // make different than below
+ } else if (buffer->externalData() != nullptr) {
+ ss << (isInput ? buffer->externalData() : buffer->audioBuffer()->raw)
+ << " -> "
+ << (isInput ? buffer->audioBuffer()->raw : buffer->externalData());
+ } else {
+ ss << buffer->audioBuffer()->raw;
+ }
+ return ss.str();
+}
void AudioFlinger::EffectModule::dump(int fd, const Vector<String16>& args __unused)
{
@@ -1305,19 +1321,13 @@
result.append(buffer);
#ifdef FLOAT_EFFECT_CHAIN
- if (!mSupportsFloat) {
- int16_t* pIn16 = mInBuffer16 != 0 ? mInBuffer16->audioBuffer()->s16 : NULL;
- int16_t* pOut16 = mOutBuffer16 != 0 ? mOutBuffer16->audioBuffer()->s16 : NULL;
- result.append("\t\t- Float and int16 buffers\n");
- result.append("\t\t\tIn_float In_int16 Out_float Out_int16\n");
- snprintf(buffer, SIZE,"\t\t\t%p %p %p %p\n",
- mConfig.inputCfg.buffer.raw,
- pIn16,
- pOut16,
- mConfig.outputCfg.buffer.raw);
- result.append(buffer);
- }
+ result.appendFormat("\t\t- HAL buffers:\n"
+ "\t\t\tIn(%s) InConversion(%s) Out(%s) OutConversion(%s)\n",
+ dumpInOutBuffer(true /* isInput */, mInBuffer).c_str(),
+ dumpInOutBuffer(true /* isInput */, mInConversionBuffer).c_str(),
+ dumpInOutBuffer(false /* isInput */, mOutBuffer).c_str(),
+ dumpInOutBuffer(false /* isInput */, mOutConversionBuffer).c_str());
#endif
snprintf(buffer, SIZE, "\t\t%zu Clients:\n", mHandles.size());
@@ -2161,19 +2171,6 @@
}
}
-static void dumpInOutBuffer(
- char *dump, size_t dumpSize, bool isInput, EffectBufferHalInterface *buffer) {
- if (buffer == nullptr) {
- snprintf(dump, dumpSize, "%p", buffer);
- } else if (buffer->externalData() != nullptr) {
- snprintf(dump, dumpSize, "%p -> %p",
- isInput ? buffer->externalData() : buffer->audioBuffer()->raw,
- isInput ? buffer->audioBuffer()->raw : buffer->externalData());
- } else {
- snprintf(dump, dumpSize, "%p", buffer->audioBuffer()->raw);
- }
-}
-
void AudioFlinger::EffectChain::dump(int fd, const Vector<String16>& args)
{
const size_t SIZE = 256;
@@ -2191,15 +2188,13 @@
result.append("\tCould not lock mutex:\n");
}
- char inBufferStr[64], outBufferStr[64];
- dumpInOutBuffer(inBufferStr, sizeof(inBufferStr), true, mInBuffer.get());
- dumpInOutBuffer(outBufferStr, sizeof(outBufferStr), false, mOutBuffer.get());
- snprintf(buffer, SIZE, "\t%-*s%-*s Active tracks:\n",
- (int)strlen(inBufferStr), "In buffer ",
- (int)strlen(outBufferStr), "Out buffer ");
- result.append(buffer);
- snprintf(buffer, SIZE, "\t%s %s %d\n", inBufferStr, outBufferStr, mActiveTrackCnt);
- result.append(buffer);
+ const std::string inBufferStr = dumpInOutBuffer(true /* isInput */, mInBuffer);
+ const std::string outBufferStr = dumpInOutBuffer(false /* isInput */, mOutBuffer);
+ result.appendFormat("\t%-*s%-*s Active tracks:\n",
+ (int)inBufferStr.size(), "In buffer ",
+ (int)outBufferStr.size(), "Out buffer ");
+ result.appendFormat("\t%s %s %d\n",
+ inBufferStr.c_str(), outBufferStr.c_str(), mActiveTrackCnt);
write(fd, result.string(), result.size());
for (size_t i = 0; i < numEffects; ++i) {
diff --git a/services/audioflinger/Effects.h b/services/audioflinger/Effects.h
index 1864e0f..eea3208 100644
--- a/services/audioflinger/Effects.h
+++ b/services/audioflinger/Effects.h
@@ -171,8 +171,8 @@
#ifdef FLOAT_EFFECT_CHAIN
bool mSupportsFloat; // effect supports float processing
- sp<EffectBufferHalInterface> mInBuffer16; // Buffers for interacting with HAL at 16 bits
- sp<EffectBufferHalInterface> mOutBuffer16;
+ sp<EffectBufferHalInterface> mInConversionBuffer; // Buffers for HAL conversion if needed.
+ sp<EffectBufferHalInterface> mOutConversionBuffer;
#endif
};
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
index 737872d..46168a4 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
@@ -249,6 +249,9 @@
mClientInterface->closeInput(mIoHandle);
LOG_ALWAYS_FATAL_IF(mProfile->curOpenCount < 1, "%s profile open count %u",
__FUNCTION__, mProfile->curOpenCount);
+ if (isActive()) {
+ mProfile->curActiveCount--;
+ }
mProfile->curOpenCount--;
mIoHandle = AUDIO_IO_HANDLE_NONE;
}
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
index be5a1c1..f6ee1c3 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
@@ -462,6 +462,9 @@
LOG_ALWAYS_FATAL_IF(mProfile->curOpenCount < 1, "%s profile open count %u",
__FUNCTION__, mProfile->curOpenCount);
+ if (isActive()) {
+ mProfile->curActiveCount--;
+ }
mProfile->curOpenCount--;
mIoHandle = AUDIO_IO_HANDLE_NONE;
}
diff --git a/services/mediacodec/Android.mk b/services/mediacodec/Android.mk
index 9348ecd..caa0703 100644
--- a/services/mediacodec/Android.mk
+++ b/services/mediacodec/Android.mk
@@ -26,10 +26,6 @@
LOCAL_32_BIT_ONLY := true
LOCAL_INIT_RC := android.hardware.media.omx@1.0-service.rc
-ifeq ($(PRODUCT_FULL_TREBLE),true)
-LOCAL_CFLAGS += -DUSE_VNDBINDER
-endif
-
include $(BUILD_EXECUTABLE)
# service seccomp policy
diff --git a/services/mediacodec/main_codecservice.cpp b/services/mediacodec/main_codecservice.cpp
index 6f14a42..701ca6e 100644
--- a/services/mediacodec/main_codecservice.cpp
+++ b/services/mediacodec/main_codecservice.cpp
@@ -40,10 +40,8 @@
signal(SIGPIPE, SIG_IGN);
SetUpMinijail(kSystemSeccompPolicyPath, kVendorSeccompPolicyPath);
-#ifdef USE_VNDBINDER
android::ProcessState::initWithDriver("/dev/vndbinder");
android::ProcessState::self()->startThreadPool();
-#endif // USE_VNDBINDER
::android::hardware::configureRpcThreadpool(64, false);
diff --git a/services/oboeservice/AAudioService.cpp b/services/oboeservice/AAudioService.cpp
index 51ae665..ac3202b 100644
--- a/services/oboeservice/AAudioService.cpp
+++ b/services/oboeservice/AAudioService.cpp
@@ -142,7 +142,31 @@
}
}
+// If a close request is pending then close the stream
+bool AAudioService::releaseStream(const sp<AAudioServiceStreamBase> &serviceStream) {
+ bool closed = false;
+ if ((serviceStream->decrementServiceReferenceCount() == 0) && serviceStream->isCloseNeeded()) {
+ // removeStreamByHandle() uses a lock so that if there are two simultaneous closes
+ // then only one will get the pointer and do the close.
+ sp<AAudioServiceStreamBase> foundStream = mStreamTracker.removeStreamByHandle(serviceStream->getHandle());
+ if (foundStream.get() != nullptr) {
+ foundStream->close();
+ pid_t pid = foundStream->getOwnerProcessId();
+ AAudioClientTracker::getInstance().unregisterClientStream(pid, foundStream);
+ }
+ closed = true;
+ }
+ return closed;
+}
+
+aaudio_result_t AAudioService::checkForPendingClose(
+ const sp<AAudioServiceStreamBase> &serviceStream,
+ aaudio_result_t defaultResult) {
+ return releaseStream(serviceStream) ? AAUDIO_ERROR_INVALID_STATE : defaultResult;
+}
+
aaudio_result_t AAudioService::closeStream(aaudio_handle_t streamHandle) {
+ ALOGD("closeStream(0x%08X)", streamHandle);
// Check permission and ownership first.
sp<AAudioServiceStreamBase> serviceStream = convertHandleToServiceStream(streamHandle);
if (serviceStream.get() == nullptr) {
@@ -150,22 +174,13 @@
return AAUDIO_ERROR_INVALID_HANDLE;
}
- ALOGD("closeStream(0x%08X)", streamHandle);
- // Remove handle from tracker so that we cannot look up the raw address any more.
- // removeStreamByHandle() uses a lock so that if there are two simultaneous closes
- // then only one will get the pointer and do the close.
- serviceStream = mStreamTracker.removeStreamByHandle(streamHandle);
- if (serviceStream.get() != nullptr) {
- serviceStream->close();
- pid_t pid = serviceStream->getOwnerProcessId();
- AAudioClientTracker::getInstance().unregisterClientStream(pid, serviceStream);
- return AAUDIO_OK;
- } else {
- ALOGW("closeStream(0x%0x) being handled by another thread", streamHandle);
- return AAUDIO_ERROR_INVALID_HANDLE;
- }
-}
+ pid_t pid = serviceStream->getOwnerProcessId();
+ AAudioClientTracker::getInstance().unregisterClientStream(pid, serviceStream);
+ serviceStream->setCloseNeeded(true);
+ (void) releaseStream(serviceStream);
+ return AAUDIO_OK;
+}
sp<AAudioServiceStreamBase> AAudioService::convertHandleToServiceStream(
aaudio_handle_t streamHandle) {
@@ -181,7 +196,9 @@
if (!allowed) {
ALOGE("AAudioService: calling uid %d cannot access stream 0x%08X owned by %d",
callingUserId, streamHandle, ownerUserId);
- serviceStream = nullptr;
+ serviceStream.clear();
+ } else {
+ serviceStream->incrementServiceReferenceCount();
}
}
return serviceStream;
@@ -198,7 +215,7 @@
aaudio_result_t result = serviceStream->getDescription(parcelable);
// parcelable.dump();
- return result;
+ return checkForPendingClose(serviceStream, result);
}
aaudio_result_t AAudioService::startStream(aaudio_handle_t streamHandle) {
@@ -208,7 +225,8 @@
return AAUDIO_ERROR_INVALID_HANDLE;
}
- return serviceStream->start();
+ aaudio_result_t result = serviceStream->start();
+ return checkForPendingClose(serviceStream, result);
}
aaudio_result_t AAudioService::pauseStream(aaudio_handle_t streamHandle) {
@@ -218,7 +236,7 @@
return AAUDIO_ERROR_INVALID_HANDLE;
}
aaudio_result_t result = serviceStream->pause();
- return result;
+ return checkForPendingClose(serviceStream, result);
}
aaudio_result_t AAudioService::stopStream(aaudio_handle_t streamHandle) {
@@ -228,7 +246,7 @@
return AAUDIO_ERROR_INVALID_HANDLE;
}
aaudio_result_t result = serviceStream->stop();
- return result;
+ return checkForPendingClose(serviceStream, result);
}
aaudio_result_t AAudioService::flushStream(aaudio_handle_t streamHandle) {
@@ -237,48 +255,51 @@
ALOGE("flushStream(), illegal stream handle = 0x%0x", streamHandle);
return AAUDIO_ERROR_INVALID_HANDLE;
}
- return serviceStream->flush();
+ aaudio_result_t result = serviceStream->flush();
+ return checkForPendingClose(serviceStream, result);
}
aaudio_result_t AAudioService::registerAudioThread(aaudio_handle_t streamHandle,
pid_t clientThreadId,
int64_t periodNanoseconds) {
+ aaudio_result_t result = AAUDIO_OK;
sp<AAudioServiceStreamBase> serviceStream = convertHandleToServiceStream(streamHandle);
if (serviceStream.get() == nullptr) {
ALOGE("registerAudioThread(), illegal stream handle = 0x%0x", streamHandle);
return AAUDIO_ERROR_INVALID_HANDLE;
}
if (serviceStream->getRegisteredThread() != AAudioServiceStreamBase::ILLEGAL_THREAD_ID) {
- ALOGE("registerAudioThread(), thread already registered");
- return AAUDIO_ERROR_INVALID_STATE;
- }
-
- const pid_t ownerPid = IPCThreadState::self()->getCallingPid(); // TODO review
- serviceStream->setRegisteredThread(clientThreadId);
- int err = android::requestPriority(ownerPid, clientThreadId,
- DEFAULT_AUDIO_PRIORITY, true /* isForApp */);
- if (err != 0){
- ALOGE("registerAudioThread(%d) failed, errno = %d, priority = %d",
- clientThreadId, errno, DEFAULT_AUDIO_PRIORITY);
- return AAUDIO_ERROR_INTERNAL;
+ ALOGE("AAudioService::registerAudioThread(), thread already registered");
+ result = AAUDIO_ERROR_INVALID_STATE;
} else {
- return AAUDIO_OK;
+ const pid_t ownerPid = IPCThreadState::self()->getCallingPid(); // TODO review
+ serviceStream->setRegisteredThread(clientThreadId);
+ int err = android::requestPriority(ownerPid, clientThreadId,
+ DEFAULT_AUDIO_PRIORITY, true /* isForApp */);
+ if (err != 0) {
+ ALOGE("AAudioService::registerAudioThread(%d) failed, errno = %d, priority = %d",
+ clientThreadId, errno, DEFAULT_AUDIO_PRIORITY);
+ result = AAUDIO_ERROR_INTERNAL;
+ }
}
+ return checkForPendingClose(serviceStream, result);
}
aaudio_result_t AAudioService::unregisterAudioThread(aaudio_handle_t streamHandle,
pid_t clientThreadId) {
+ aaudio_result_t result = AAUDIO_OK;
sp<AAudioServiceStreamBase> serviceStream = convertHandleToServiceStream(streamHandle);
if (serviceStream.get() == nullptr) {
ALOGE("unregisterAudioThread(), illegal stream handle = 0x%0x", streamHandle);
return AAUDIO_ERROR_INVALID_HANDLE;
}
if (serviceStream->getRegisteredThread() != clientThreadId) {
- ALOGE("unregisterAudioThread(), wrong thread");
- return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
+ ALOGE("AAudioService::unregisterAudioThread(), wrong thread");
+ result = AAUDIO_ERROR_ILLEGAL_ARGUMENT;
+ } else {
+ serviceStream->setRegisteredThread(0);
}
- serviceStream->setRegisteredThread(0);
- return AAUDIO_OK;
+ return checkForPendingClose(serviceStream, result);
}
aaudio_result_t AAudioService::startClient(aaudio_handle_t streamHandle,
@@ -289,7 +310,8 @@
ALOGE("startClient(), illegal stream handle = 0x%0x", streamHandle);
return AAUDIO_ERROR_INVALID_HANDLE;
}
- return serviceStream->startClient(client, clientHandle);
+ aaudio_result_t result = serviceStream->startClient(client, clientHandle);
+ return checkForPendingClose(serviceStream, result);
}
aaudio_result_t AAudioService::stopClient(aaudio_handle_t streamHandle,
@@ -299,5 +321,6 @@
ALOGE("stopClient(), illegal stream handle = 0x%0x", streamHandle);
return AAUDIO_ERROR_INVALID_HANDLE;
}
- return serviceStream->stopClient(clientHandle);
+ aaudio_result_t result = serviceStream->stopClient(clientHandle);
+ return checkForPendingClose(serviceStream, result);
}
diff --git a/services/oboeservice/AAudioService.h b/services/oboeservice/AAudioService.h
index eef0824..bdd9e0b 100644
--- a/services/oboeservice/AAudioService.h
+++ b/services/oboeservice/AAudioService.h
@@ -94,9 +94,15 @@
aaudio::aaudio_handle_t streamHandle);
- android::AudioClient mAudioClient;
- aaudio::AAudioStreamTracker mStreamTracker;
+ bool releaseStream(const sp<aaudio::AAudioServiceStreamBase> &serviceStream);
+
+ aaudio_result_t checkForPendingClose(const sp<aaudio::AAudioServiceStreamBase> &serviceStream,
+ aaudio_result_t defaultResult);
+
+ android::AudioClient mAudioClient;
+
+ aaudio::AAudioStreamTracker mStreamTracker;
enum constants {
DEFAULT_AUDIO_PRIORITY = 2
diff --git a/services/oboeservice/AAudioServiceStreamBase.cpp b/services/oboeservice/AAudioServiceStreamBase.cpp
index 635b45c..53d2860 100644
--- a/services/oboeservice/AAudioServiceStreamBase.cpp
+++ b/services/oboeservice/AAudioServiceStreamBase.cpp
@@ -402,3 +402,13 @@
void AAudioServiceStreamBase::onVolumeChanged(float volume) {
sendServiceEvent(AAUDIO_SERVICE_EVENT_VOLUME, volume);
}
+
+int32_t AAudioServiceStreamBase::incrementServiceReferenceCount() {
+ std::lock_guard<std::mutex> lock(mCallingCountLock);
+ return ++mCallingCount;
+}
+
+int32_t AAudioServiceStreamBase::decrementServiceReferenceCount() {
+ std::lock_guard<std::mutex> lock(mCallingCountLock);
+ return --mCallingCount;
+}
diff --git a/services/oboeservice/AAudioServiceStreamBase.h b/services/oboeservice/AAudioServiceStreamBase.h
index 29987f6..5f5bb98 100644
--- a/services/oboeservice/AAudioServiceStreamBase.h
+++ b/services/oboeservice/AAudioServiceStreamBase.h
@@ -199,6 +199,26 @@
return mFlowing;
}
+ /**
+ * Atomically increment the number of active references to the stream by AAudioService.
+ * @return value after the increment
+ */
+ int32_t incrementServiceReferenceCount();
+
+ /**
+ * Atomically decrement the number of active references to the stream by AAudioService.
+ * @return value after the decrement
+ */
+ int32_t decrementServiceReferenceCount();
+
+ bool isCloseNeeded() const {
+ return mCloseNeeded.load();
+ }
+
+ void setCloseNeeded(bool needed) {
+ mCloseNeeded.store(needed);
+ }
+
protected:
/**
@@ -256,8 +276,11 @@
private:
aaudio_handle_t mHandle = -1;
-
bool mFlowing = false;
+
+ std::mutex mCallingCountLock;
+ std::atomic<int32_t> mCallingCount{0};
+ std::atomic<bool> mCloseNeeded{false};
};
} /* namespace aaudio */