Merge "Add manifests for media and media.swcodec APEXs"
diff --git a/media/extractors/mp4/AC4Parser.cpp b/media/extractors/mp4/AC4Parser.cpp
index 59a2e9b..13d60c8 100644
--- a/media/extractors/mp4/AC4Parser.cpp
+++ b/media/extractors/mp4/AC4Parser.cpp
@@ -260,7 +260,7 @@
int32_t short_program_id = -1;
if (bitstream_version > 1) {
- if (ac4_dsi_version == 0){
+ if (ac4_dsi_version == 0) {
ALOGE("invalid ac4 dsi");
return false;
}
@@ -295,6 +295,7 @@
bool b_single_substream_group = false;
uint32_t presentation_config = 0, presentation_version = 0;
uint32_t pres_bytes = 0;
+ uint64_t start = 0;
if (ac4_dsi_version == 0) {
CHECK_BITS_LEFT(1 + 5 + 5);
@@ -315,6 +316,8 @@
mBitReader.skipBits(pres_bytes * 8);
continue;
}
+ /* record a marker, less the size of the presentation_config */
+ start = (mDSISize - mBitReader.numBitsLeft()) / 8;
// ac4_presentation_v0_dsi(), ac4_presentation_v1_dsi() and ac4_presentation_v2_dsi()
// all start with a presentation_config of 5 bits
CHECK_BITS_LEFT(5);
@@ -338,9 +341,6 @@
(presentation_config >= NELEM(PresentationConfig) ?
"reserved" : PresentationConfig[presentation_config]));
- /* record a marker, less the size of the presentation_config */
- uint64_t start = (mDSISize - mBitReader.numBitsLeft()) / 8;
-
bool b_add_emdf_substreams = false;
if (!b_single_substream_group && presentation_config == 6) {
b_add_emdf_substreams = true;
@@ -535,14 +535,14 @@
}
break;
}
- CHECK_BITS_LEFT(1 + 1);
- bool b_pre_virtualized = (mBitReader.getBits(1) == 1);
- mPresentations[presentation].mPreVirtualized = b_pre_virtualized;
- b_add_emdf_substreams = (mBitReader.getBits(1) == 1);
- ALOGV("%u: b_pre_virtualized = %s\n", presentation, BOOLSTR(b_pre_virtualized));
- ALOGV("%u: b_add_emdf_substreams = %s\n", presentation,
- BOOLSTR(b_add_emdf_substreams));
}
+ CHECK_BITS_LEFT(1 + 1);
+ bool b_pre_virtualized = (mBitReader.getBits(1) == 1);
+ mPresentations[presentation].mPreVirtualized = b_pre_virtualized;
+ b_add_emdf_substreams = (mBitReader.getBits(1) == 1);
+ ALOGV("%u: b_pre_virtualized = %s\n", presentation, BOOLSTR(b_pre_virtualized));
+ ALOGV("%u: b_add_emdf_substreams = %s\n", presentation,
+ BOOLSTR(b_add_emdf_substreams));
}
if (b_add_emdf_substreams) {
CHECK_BITS_LEFT(7);
@@ -599,10 +599,6 @@
if (ac4_dsi_version == 1) {
uint64_t end = (mDSISize - mBitReader.numBitsLeft()) / 8;
- if (mBitReader.numBitsLeft() % 8 != 0) {
- end += 1;
- }
-
uint64_t presentation_bytes = end - start;
uint64_t skip_bytes = pres_bytes - presentation_bytes;
ALOGV("skipping = %" PRIu64 " bytes", skip_bytes);
@@ -612,7 +608,7 @@
// we should know this or something is probably wrong
// with the bitstream (or we don't support it)
- if (mPresentations[presentation].mChannelMode == -1){
+ if (mPresentations[presentation].mChannelMode == -1) {
ALOGE("could not determing channel mode of presentation %d", presentation);
return false;
}
diff --git a/media/libaudioclient/AudioRecord.cpp b/media/libaudioclient/AudioRecord.cpp
index 9daa299..8df1921 100644
--- a/media/libaudioclient/AudioRecord.cpp
+++ b/media/libaudioclient/AudioRecord.cpp
@@ -70,70 +70,34 @@
// ---------------------------------------------------------------------------
-static std::string audioFormatTypeString(audio_format_t value) {
- std::string formatType;
- if (FormatConverter::toString(value, formatType)) {
- return formatType;
- }
- char rawbuffer[16]; // room for "%d"
- snprintf(rawbuffer, sizeof(rawbuffer), "%d", value);
- return rawbuffer;
-}
-
-static std::string audioSourceString(audio_source_t value) {
- std::string source;
- if (SourceTypeConverter::toString(value, source)) {
- return source;
- }
- char rawbuffer[16]; // room for "%d"
- snprintf(rawbuffer, sizeof(rawbuffer), "%d", value);
- return rawbuffer;
-}
-
void AudioRecord::MediaMetrics::gather(const AudioRecord *record)
{
- // key for media statistics is defined in the header
- // attrs for media statistics
- // NB: these are matched with public Java API constants defined
- // in frameworks/base/media/java/android/media/AudioRecord.java
- // These must be kept synchronized with the constants there.
- static constexpr char kAudioRecordEncoding[] = "android.media.audiorecord.encoding";
- static constexpr char kAudioRecordSource[] = "android.media.audiorecord.source";
- static constexpr char kAudioRecordLatency[] = "android.media.audiorecord.latency";
- static constexpr char kAudioRecordSampleRate[] = "android.media.audiorecord.samplerate";
- static constexpr char kAudioRecordChannelCount[] = "android.media.audiorecord.channels";
- static constexpr char kAudioRecordCreated[] = "android.media.audiorecord.createdMs";
- static constexpr char kAudioRecordDuration[] = "android.media.audiorecord.durationMs";
- static constexpr char kAudioRecordCount[] = "android.media.audiorecord.n";
- static constexpr char kAudioRecordError[] = "android.media.audiorecord.errcode";
- static constexpr char kAudioRecordErrorFunction[] = "android.media.audiorecord.errfunc";
+#define MM_PREFIX "android.media.audiorecord." // avoid cut-n-paste errors.
- // constructor guarantees mAnalyticsItem is valid
+ // Java API 28 entries, do not change.
+ mAnalyticsItem->setCString(MM_PREFIX "encoding", toString(record->mFormat).c_str());
+ mAnalyticsItem->setCString(MM_PREFIX "source", toString(record->mAttributes.source).c_str());
+ mAnalyticsItem->setInt32(MM_PREFIX "latency", (int32_t)record->mLatency); // bad estimate.
+ mAnalyticsItem->setInt32(MM_PREFIX "samplerate", (int32_t)record->mSampleRate);
+ mAnalyticsItem->setInt32(MM_PREFIX "channels", (int32_t)record->mChannelCount);
- mAnalyticsItem->setInt32(kAudioRecordLatency, record->mLatency);
- mAnalyticsItem->setInt32(kAudioRecordSampleRate, record->mSampleRate);
- mAnalyticsItem->setInt32(kAudioRecordChannelCount, record->mChannelCount);
- mAnalyticsItem->setCString(kAudioRecordEncoding,
- audioFormatTypeString(record->mFormat).c_str());
- mAnalyticsItem->setCString(kAudioRecordSource,
- audioSourceString(record->mAttributes.source).c_str());
+ // Non-API entries, these can change.
+ mAnalyticsItem->setInt32(MM_PREFIX "portId", (int32_t)record->mPortId);
+ mAnalyticsItem->setInt32(MM_PREFIX "frameCount", (int32_t)record->mFrameCount);
+ mAnalyticsItem->setCString(MM_PREFIX "attributes", toString(record->mAttributes).c_str());
+ mAnalyticsItem->setInt64(MM_PREFIX "channelMask", (int64_t)record->mChannelMask);
- // log total duration recording, including anything currently running [and count].
- nsecs_t active = 0;
+ // log total duration recording, including anything currently running.
+ int64_t activeNs = 0;
if (mStartedNs != 0) {
- active = systemTime() - mStartedNs;
+ activeNs = systemTime() - mStartedNs;
}
- mAnalyticsItem->setInt64(kAudioRecordDuration, (mDurationNs + active) / (1000 * 1000));
- mAnalyticsItem->setInt32(kAudioRecordCount, mCount);
-
- // XXX I don't know that this adds a lot of value, long term
- if (mCreatedNs != 0) {
- mAnalyticsItem->setInt64(kAudioRecordCreated, mCreatedNs / (1000 * 1000));
- }
+ mAnalyticsItem->setDouble(MM_PREFIX "durationMs", (mDurationNs + activeNs) * 1e-6);
+ mAnalyticsItem->setInt64(MM_PREFIX "startCount", (int64_t)mCount);
if (mLastError != NO_ERROR) {
- mAnalyticsItem->setInt32(kAudioRecordError, mLastError);
- mAnalyticsItem->setCString(kAudioRecordErrorFunction, mLastErrorFunc.c_str());
+ mAnalyticsItem->setInt32(MM_PREFIX "lastError.code", (int32_t)mLastError);
+ mAnalyticsItem->setCString(MM_PREFIX "lastError.at", mLastErrorFunc.c_str());
}
}
@@ -349,7 +313,7 @@
mCbf = cbf;
if (cbf != NULL) {
- mAudioRecordThread = new AudioRecordThread(*this, threadCanCallJava);
+ mAudioRecordThread = new AudioRecordThread(*this);
mAudioRecordThread->run("AudioRecord", ANDROID_PRIORITY_AUDIO);
// thread begins in paused state, and will not reference us until start()
}
@@ -1426,8 +1390,7 @@
// =========================================================================
-AudioRecord::AudioRecordThread::AudioRecordThread(AudioRecord& receiver,
- bool bCanCallJava __unused)
+AudioRecord::AudioRecordThread::AudioRecordThread(AudioRecord& receiver)
: Thread(true /* bCanCallJava */) // binder recursion on restoreRecord_l() may call Java.
, mReceiver(receiver), mPaused(true), mPausedInt(false), mPausedNs(0LL),
mIgnoreNextPausedInt(false)
diff --git a/media/libaudioclient/AudioTrack.cpp b/media/libaudioclient/AudioTrack.cpp
index 670a200..e59f7e0 100644
--- a/media/libaudioclient/AudioTrack.cpp
+++ b/media/libaudioclient/AudioTrack.cpp
@@ -170,44 +170,8 @@
// ---------------------------------------------------------------------------
-static std::string audioContentTypeString(audio_content_type_t value) {
- std::string contentType;
- if (AudioContentTypeConverter::toString(value, contentType)) {
- return contentType;
- }
- char rawbuffer[16]; // room for "%d"
- snprintf(rawbuffer, sizeof(rawbuffer), "%d", value);
- return rawbuffer;
-}
-
-static std::string audioUsageString(audio_usage_t value) {
- std::string usage;
- if (UsageTypeConverter::toString(value, usage)) {
- return usage;
- }
- char rawbuffer[16]; // room for "%d"
- snprintf(rawbuffer, sizeof(rawbuffer), "%d", value);
- return rawbuffer;
-}
-
void AudioTrack::MediaMetrics::gather(const AudioTrack *track)
{
-
- // key for media statistics is defined in the header
- // attrs for media statistics
- // NB: these are matched with public Java API constants defined
- // in frameworks/base/media/java/android/media/AudioTrack.java
- // These must be kept synchronized with the constants there.
- static constexpr char kAudioTrackStreamType[] = "android.media.audiotrack.streamtype";
- static constexpr char kAudioTrackContentType[] = "android.media.audiotrack.type";
- static constexpr char kAudioTrackUsage[] = "android.media.audiotrack.usage";
- static constexpr char kAudioTrackSampleRate[] = "android.media.audiotrack.samplerate";
- static constexpr char kAudioTrackChannelMask[] = "android.media.audiotrack.channelmask";
-
- // NB: These are not yet exposed as public Java API constants.
- static constexpr char kAudioTrackUnderrunFrames[] = "android.media.audiotrack.underrunframes";
- static constexpr char kAudioTrackStartupGlitch[] = "android.media.audiotrack.glitch.startup";
-
// only if we're in a good state...
// XXX: shall we gather alternative info if failing?
const status_t lstatus = track->initCheck();
@@ -216,28 +180,22 @@
return;
}
- // constructor guarantees mAnalyticsItem is valid
+#define MM_PREFIX "android.media.audiotrack." // avoid cut-n-paste errors.
- const int32_t underrunFrames = track->getUnderrunFrames();
- if (underrunFrames != 0) {
- mAnalyticsItem->setInt32(kAudioTrackUnderrunFrames, underrunFrames);
- }
+ // Java API 28 entries, do not change.
+ mAnalyticsItem->setCString(MM_PREFIX "streamtype", toString(track->streamType()).c_str());
+ mAnalyticsItem->setCString(MM_PREFIX "type",
+ toString(track->mAttributes.content_type).c_str());
+ mAnalyticsItem->setCString(MM_PREFIX "usage", toString(track->mAttributes.usage).c_str());
- if (track->mTimestampStartupGlitchReported) {
- mAnalyticsItem->setInt32(kAudioTrackStartupGlitch, 1);
- }
-
- if (track->mStreamType != -1) {
- // deprecated, but this will tell us who still uses it.
- mAnalyticsItem->setInt32(kAudioTrackStreamType, track->mStreamType);
- }
- // XXX: consider including from mAttributes: source type
- mAnalyticsItem->setCString(kAudioTrackContentType,
- audioContentTypeString(track->mAttributes.content_type).c_str());
- mAnalyticsItem->setCString(kAudioTrackUsage,
- audioUsageString(track->mAttributes.usage).c_str());
- mAnalyticsItem->setInt32(kAudioTrackSampleRate, track->mSampleRate);
- mAnalyticsItem->setInt64(kAudioTrackChannelMask, track->mChannelMask);
+ // Non-API entries, these can change due to a Java string mistake.
+ mAnalyticsItem->setInt32(MM_PREFIX "sampleRate", (int32_t)track->mSampleRate);
+ mAnalyticsItem->setInt64(MM_PREFIX "channelMask", (int64_t)track->mChannelMask);
+ // Non-API entries, these can change.
+ mAnalyticsItem->setInt32(MM_PREFIX "portId", (int32_t)track->mPortId);
+ mAnalyticsItem->setCString(MM_PREFIX "encoding", toString(track->mFormat).c_str());
+ mAnalyticsItem->setInt32(MM_PREFIX "frameCount", (int32_t)track->mFrameCount);
+ mAnalyticsItem->setCString(MM_PREFIX "attributes", toString(track->mAttributes).c_str());
}
// hand the user a snapshot of the metrics.
@@ -615,7 +573,7 @@
mCbf = cbf;
if (cbf != NULL) {
- mAudioTrackThread = new AudioTrackThread(*this, threadCanCallJava);
+ mAudioTrackThread = new AudioTrackThread(*this);
mAudioTrackThread->run("AudioTrack", ANDROID_PRIORITY_AUDIO, 0 /*stack*/);
// thread begins in paused state, and will not reference us until start()
}
@@ -3127,7 +3085,7 @@
// =========================================================================
-AudioTrack::AudioTrackThread::AudioTrackThread(AudioTrack& receiver, bool bCanCallJava __unused)
+AudioTrack::AudioTrackThread::AudioTrackThread(AudioTrack& receiver)
: Thread(true /* bCanCallJava */) // binder recursion on restoreTrack_l() may call Java.
, mReceiver(receiver), mPaused(true), mPausedInt(false), mPausedNs(0LL),
mIgnoreNextPausedInt(false)
diff --git a/media/libaudioclient/include/media/AudioMixer.h b/media/libaudioclient/include/media/AudioMixer.h
index 41b425f..783eef3 100644
--- a/media/libaudioclient/include/media/AudioMixer.h
+++ b/media/libaudioclient/include/media/AudioMixer.h
@@ -273,7 +273,7 @@
mPostDownmixReformatBufferProvider.reset(nullptr);
mDownmixerBufferProvider.reset(nullptr);
mReformatBufferProvider.reset(nullptr);
- mAdjustChannelsNonDestructiveBufferProvider.reset(nullptr);
+ mContractChannelsNonDestructiveBufferProvider.reset(nullptr);
mAdjustChannelsBufferProvider.reset(nullptr);
}
@@ -347,8 +347,12 @@
* all pre-mixer track buffer conversions outside the AudioMixer class.
*
* 1) mInputBufferProvider: The AudioTrack buffer provider.
- * 2) mAdjustChannelsBufferProvider: Expend or contracts data
- * 3) mAdjustChannelsNonDestructiveBufferProvider: Non-destructively adjust sample data
+ * 2) mAdjustChannelsBufferProvider: Expands or contracts sample data from one interleaved
+ * channel format to another. Expanded channels are filled with zeros and put at the end
+ * of each audio frame. Contracted channels are copied to the end of the buffer.
+ * 3) mContractChannelsNonDestructiveBufferProvider: Non-destructively contract sample data.
+ * This is currently using at audio-haptic coupled playback to separate audio and haptic
+ * data. Contracted channels could be written to given buffer.
* 4) mReformatBufferProvider: If not NULL, performs the audio reformat to
* match either mMixerInFormat or mDownmixRequiresFormat, if the downmixer
* requires reformat. For example, it may convert floating point input to
@@ -360,9 +364,10 @@
* 7) mTimestretchBufferProvider: Adds timestretching for playback rate
*/
AudioBufferProvider* mInputBufferProvider; // externally provided buffer provider.
- // TODO: combine AdjustChannelsBufferProvider and AdjustChannelsNonDestructiveBufferProvider
+ // TODO: combine mAdjustChannelsBufferProvider and
+ // mContractChannelsNonDestructiveBufferProvider
std::unique_ptr<PassthruBufferProvider> mAdjustChannelsBufferProvider;
- std::unique_ptr<PassthruBufferProvider> mAdjustChannelsNonDestructiveBufferProvider;
+ std::unique_ptr<PassthruBufferProvider> mContractChannelsNonDestructiveBufferProvider;
std::unique_ptr<PassthruBufferProvider> mReformatBufferProvider;
std::unique_ptr<PassthruBufferProvider> mDownmixerBufferProvider;
std::unique_ptr<PassthruBufferProvider> mPostDownmixReformatBufferProvider;
diff --git a/media/libaudioclient/include/media/AudioRecord.h b/media/libaudioclient/include/media/AudioRecord.h
index 4707c4a..a9f8711 100644
--- a/media/libaudioclient/include/media/AudioRecord.h
+++ b/media/libaudioclient/include/media/AudioRecord.h
@@ -562,7 +562,7 @@
class AudioRecordThread : public Thread
{
public:
- AudioRecordThread(AudioRecord& receiver, bool bCanCallJava = false);
+ AudioRecordThread(AudioRecord& receiver);
// Do not call Thread::requestExitAndWait() without first calling requestExit().
// Thread::requestExitAndWait() is not virtual, and the implementation doesn't do enough.
diff --git a/media/libaudioclient/include/media/AudioTrack.h b/media/libaudioclient/include/media/AudioTrack.h
index 12f5d71..3926ead 100644
--- a/media/libaudioclient/include/media/AudioTrack.h
+++ b/media/libaudioclient/include/media/AudioTrack.h
@@ -934,7 +934,7 @@
class AudioTrackThread : public Thread
{
public:
- AudioTrackThread(AudioTrack& receiver, bool bCanCallJava = false);
+ AudioTrackThread(AudioTrack& receiver);
// Do not call Thread::requestExitAndWait() without first calling requestExit().
// Thread::requestExitAndWait() is not virtual, and the implementation doesn't do enough.
diff --git a/media/libaudioprocessing/AudioMixer.cpp b/media/libaudioprocessing/AudioMixer.cpp
index 2c57db7..f7cc096 100644
--- a/media/libaudioprocessing/AudioMixer.cpp
+++ b/media/libaudioprocessing/AudioMixer.cpp
@@ -408,8 +408,8 @@
void AudioMixer::Track::unprepareForAdjustChannelsNonDestructive()
{
ALOGV("AUDIOMIXER::unprepareForAdjustChannelsNonDestructive");
- if (mAdjustChannelsNonDestructiveBufferProvider.get() != nullptr) {
- mAdjustChannelsNonDestructiveBufferProvider.reset(nullptr);
+ if (mContractChannelsNonDestructiveBufferProvider.get() != nullptr) {
+ mContractChannelsNonDestructiveBufferProvider.reset(nullptr);
reconfigureBufferProviders();
}
}
@@ -426,13 +426,13 @@
? (uint8_t*)mainBuffer + frames * audio_bytes_per_frame(
mMixerChannelCount, mMixerFormat)
: NULL;
- mAdjustChannelsNonDestructiveBufferProvider.reset(
- new AdjustChannelsNonDestructiveBufferProvider(
+ mContractChannelsNonDestructiveBufferProvider.reset(
+ new AdjustChannelsBufferProvider(
mFormat,
mAdjustNonDestructiveInChannelCount,
mAdjustNonDestructiveOutChannelCount,
- mKeepContractedChannels ? mMixerFormat : AUDIO_FORMAT_INVALID,
frames,
+ mKeepContractedChannels ? mMixerFormat : AUDIO_FORMAT_INVALID,
buffer));
reconfigureBufferProviders();
}
@@ -441,9 +441,9 @@
void AudioMixer::Track::clearContractedBuffer()
{
- if (mAdjustChannelsNonDestructiveBufferProvider.get() != nullptr) {
- static_cast<AdjustChannelsNonDestructiveBufferProvider*>(
- mAdjustChannelsNonDestructiveBufferProvider.get())->clearContractedFrames();
+ if (mContractChannelsNonDestructiveBufferProvider.get() != nullptr) {
+ static_cast<AdjustChannelsBufferProvider*>(
+ mContractChannelsNonDestructiveBufferProvider.get())->clearContractedFrames();
}
}
@@ -455,9 +455,9 @@
mAdjustChannelsBufferProvider->setBufferProvider(bufferProvider);
bufferProvider = mAdjustChannelsBufferProvider.get();
}
- if (mAdjustChannelsNonDestructiveBufferProvider.get() != nullptr) {
- mAdjustChannelsNonDestructiveBufferProvider->setBufferProvider(bufferProvider);
- bufferProvider = mAdjustChannelsNonDestructiveBufferProvider.get();
+ if (mContractChannelsNonDestructiveBufferProvider.get() != nullptr) {
+ mContractChannelsNonDestructiveBufferProvider->setBufferProvider(bufferProvider);
+ bufferProvider = mContractChannelsNonDestructiveBufferProvider.get();
}
if (mReformatBufferProvider.get() != nullptr) {
mReformatBufferProvider->setBufferProvider(bufferProvider);
@@ -966,8 +966,8 @@
track->mDownmixerBufferProvider->reset();
} else if (track->mReformatBufferProvider.get() != nullptr) {
track->mReformatBufferProvider->reset();
- } else if (track->mAdjustChannelsNonDestructiveBufferProvider.get() != nullptr) {
- track->mAdjustChannelsNonDestructiveBufferProvider->reset();
+ } else if (track->mContractChannelsNonDestructiveBufferProvider.get() != nullptr) {
+ track->mContractChannelsNonDestructiveBufferProvider->reset();
} else if (track->mAdjustChannelsBufferProvider.get() != nullptr) {
track->mAdjustChannelsBufferProvider->reset();
}
diff --git a/media/libaudioprocessing/BufferProviders.cpp b/media/libaudioprocessing/BufferProviders.cpp
index b764ccb..21d25e1 100644
--- a/media/libaudioprocessing/BufferProviders.cpp
+++ b/media/libaudioprocessing/BufferProviders.cpp
@@ -627,79 +627,68 @@
}
}
-AdjustChannelsBufferProvider::AdjustChannelsBufferProvider(audio_format_t format,
- size_t inChannelCount, size_t outChannelCount, size_t frameCount) :
+AdjustChannelsBufferProvider::AdjustChannelsBufferProvider(
+ audio_format_t format, size_t inChannelCount, size_t outChannelCount,
+ size_t frameCount, audio_format_t contractedFormat, void* contractedBuffer) :
CopyBufferProvider(
audio_bytes_per_frame(inChannelCount, format),
- audio_bytes_per_frame(outChannelCount, format),
+ audio_bytes_per_frame(std::max(inChannelCount, outChannelCount), format),
frameCount),
mFormat(format),
mInChannelCount(inChannelCount),
mOutChannelCount(outChannelCount),
- mSampleSizeInBytes(audio_bytes_per_sample(format))
-{
- ALOGV("AdjustBufferProvider(%p)(%#x, %zu, %zu, %zu)",
- this, format, inChannelCount, outChannelCount, frameCount);
-}
-
-void AdjustChannelsBufferProvider::copyFrames(void *dst, const void *src, size_t frames)
-{
- adjust_channels(src, mInChannelCount, dst, mOutChannelCount, mSampleSizeInBytes,
- frames * mInChannelCount * mSampleSizeInBytes);
-}
-
-AdjustChannelsNonDestructiveBufferProvider::AdjustChannelsNonDestructiveBufferProvider(
- audio_format_t format, size_t inChannelCount, size_t outChannelCount,
- audio_format_t contractedFormat, size_t contractedFrameCount, void* contractedBuffer) :
- CopyBufferProvider(
- audio_bytes_per_frame(std::max(inChannelCount, outChannelCount), format),
- audio_bytes_per_frame(std::max(inChannelCount, outChannelCount), format),
- contractedFrameCount),
- mFormat(format),
- mInChannelCount(inChannelCount),
- mOutChannelCount(outChannelCount),
mSampleSizeInBytes(audio_bytes_per_sample(format)),
+ mFrameCount(frameCount),
mContractedChannelCount(inChannelCount - outChannelCount),
mContractedFormat(contractedFormat),
- mContractedFrameCount(contractedFrameCount),
mContractedBuffer(contractedBuffer),
mContractedWrittenFrames(0)
{
- ALOGV("AdjustChannelsNonDestructiveBufferProvider(%p)(%#x, %zu, %zu, %#x, %p)",
- this, format, inChannelCount, outChannelCount, contractedFormat, contractedBuffer);
+ ALOGV("AdjustChannelsBufferProvider(%p)(%#x, %zu, %zu, %zu, %#x, %p)", this, format,
+ inChannelCount, outChannelCount, frameCount, contractedFormat, contractedBuffer);
if (mContractedFormat != AUDIO_FORMAT_INVALID && mInChannelCount > mOutChannelCount) {
mContractedFrameSize = audio_bytes_per_frame(mContractedChannelCount, mContractedFormat);
}
}
-status_t AdjustChannelsNonDestructiveBufferProvider::getNextBuffer(
- AudioBufferProvider::Buffer* pBuffer)
+status_t AdjustChannelsBufferProvider::getNextBuffer(AudioBufferProvider::Buffer* pBuffer)
{
- const size_t outFramesLeft = mContractedFrameCount - mContractedWrittenFrames;
- if (outFramesLeft < pBuffer->frameCount) {
- // Restrict the frame count so that we don't write over the size of the output buffer.
- pBuffer->frameCount = outFramesLeft;
+ if (mContractedBuffer != nullptr) {
+ // Restrict frame count only when it is needed to save contracted frames.
+ const size_t outFramesLeft = mFrameCount - mContractedWrittenFrames;
+ if (outFramesLeft < pBuffer->frameCount) {
+ // Restrict the frame count so that we don't write over the size of the output buffer.
+ pBuffer->frameCount = outFramesLeft;
+ }
}
return CopyBufferProvider::getNextBuffer(pBuffer);
}
-void AdjustChannelsNonDestructiveBufferProvider::copyFrames(
- void *dst, const void *src, size_t frames)
+void AdjustChannelsBufferProvider::copyFrames(void *dst, const void *src, size_t frames)
{
- adjust_channels_non_destructive(src, mInChannelCount, dst, mOutChannelCount, mSampleSizeInBytes,
- frames * mInChannelCount * mSampleSizeInBytes);
- if (mContractedFormat != AUDIO_FORMAT_INVALID && mContractedBuffer != NULL
- && mInChannelCount > mOutChannelCount) {
- const size_t contractedIdx = frames * mOutChannelCount * mSampleSizeInBytes;
- memcpy_by_audio_format(
- (uint8_t*)mContractedBuffer + mContractedWrittenFrames * mContractedFrameSize,
- mContractedFormat, (uint8_t*)dst + contractedIdx, mFormat,
- mContractedChannelCount * frames);
- mContractedWrittenFrames += frames;
+ if (mInChannelCount > mOutChannelCount) {
+ // For case multi to mono, adjust_channels has special logic that will mix first two input
+ // channels into a single output channel. In that case, use adjust_channels_non_destructive
+ // to keep only one channel data even when contracting to mono.
+ adjust_channels_non_destructive(src, mInChannelCount, dst, mOutChannelCount,
+ mSampleSizeInBytes, frames * mInChannelCount * mSampleSizeInBytes);
+ if (mContractedFormat != AUDIO_FORMAT_INVALID
+ && mContractedBuffer != nullptr) {
+ const size_t contractedIdx = frames * mOutChannelCount * mSampleSizeInBytes;
+ memcpy_by_audio_format(
+ (uint8_t*) mContractedBuffer + mContractedWrittenFrames * mContractedFrameSize,
+ mContractedFormat, (uint8_t*) dst + contractedIdx, mFormat,
+ mContractedChannelCount * frames);
+ mContractedWrittenFrames += frames;
+ }
+ } else {
+ // Prefer expanding data from the end of each audio frame.
+ adjust_channels(src, mInChannelCount, dst, mOutChannelCount,
+ mSampleSizeInBytes, frames * mInChannelCount * mSampleSizeInBytes);
}
}
-void AdjustChannelsNonDestructiveBufferProvider::reset()
+void AdjustChannelsBufferProvider::reset()
{
mContractedWrittenFrames = 0;
CopyBufferProvider::reset();
diff --git a/media/libmedia/include/media/BufferProviders.h b/media/libmedia/include/media/BufferProviders.h
index ea41527..b038854 100644
--- a/media/libmedia/include/media/BufferProviders.h
+++ b/media/libmedia/include/media/BufferProviders.h
@@ -218,33 +218,21 @@
bool mAudioPlaybackRateValid; // flag for current parameters validity
};
-// AdjustBufferProvider derives from CopyBufferProvider to adjust sample data.
+// AdjustChannelsBufferProvider derives from CopyBufferProvider to adjust sample data.
// Expands or contracts sample data from one interleaved channel format to another.
-// Expanded channels are filled with zeros and put at the end of each audio frame.
-// Contracted channels are omitted from the end of each audio frame.
+// Extra expanded channels are filled with zeros and put at the end of each audio frame.
+// Contracted channels are copied to the end of the output buffer(storage should be
+// allocated appropriately).
+// Contracted channels could be written to output buffer.
class AdjustChannelsBufferProvider : public CopyBufferProvider {
public:
AdjustChannelsBufferProvider(audio_format_t format, size_t inChannelCount,
- size_t outChannelCount, size_t frameCount);
- //Overrides
- void copyFrames(void *dst, const void *src, size_t frames) override;
-
-protected:
- const audio_format_t mFormat;
- const size_t mInChannelCount;
- const size_t mOutChannelCount;
- const size_t mSampleSizeInBytes;
-};
-
-// AdjustChannelsNonDestructiveBufferProvider derives from CopyBufferProvider to adjust sample data.
-// Expands or contracts sample data from one interleaved channel format to another.
-// Extra expanded channels are interleaved in from the end of the input buffer.
-// Contracted channels are copied to the end of the output buffer.
-// Contracted channels could be written to output buffer.
-class AdjustChannelsNonDestructiveBufferProvider : public CopyBufferProvider {
-public:
- AdjustChannelsNonDestructiveBufferProvider(audio_format_t format, size_t inChannelCount,
- size_t outChannelCount, audio_format_t contractedFormat, size_t contractedFrameCount,
+ size_t outChannelCount, size_t frameCount) : AdjustChannelsBufferProvider(
+ format, inChannelCount, outChannelCount,
+ frameCount, AUDIO_FORMAT_INVALID, nullptr) { }
+ // Contracted data is converted to contractedFormat and put into contractedBuffer.
+ AdjustChannelsBufferProvider(audio_format_t format, size_t inChannelCount,
+ size_t outChannelCount, size_t frameCount, audio_format_t contractedFormat,
void* contractedBuffer);
//Overrides
status_t getNextBuffer(Buffer* pBuffer) override;
@@ -258,9 +246,9 @@
const size_t mInChannelCount;
const size_t mOutChannelCount;
const size_t mSampleSizeInBytes;
+ const size_t mFrameCount;
const size_t mContractedChannelCount;
const audio_format_t mContractedFormat;
- const size_t mContractedFrameCount;
void *mContractedBuffer;
size_t mContractedWrittenFrames;
size_t mContractedFrameSize;
diff --git a/services/audioflinger/Android.mk b/services/audioflinger/Android.mk
index 91b7587..40980a6 100644
--- a/services/audioflinger/Android.mk
+++ b/services/audioflinger/Android.mk
@@ -32,6 +32,7 @@
libbinder \
libaudioclient \
libmedialogservice \
+ libmediametrics \
libmediautils \
libnbaio \
libnblog \
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 8f181a4..468676a 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -488,6 +488,8 @@
sp<IBinder> binder = IInterface::asBinder(mPowerManager);
binder->unlinkToDeath(mDeathRecipient);
}
+
+ sendStatistics(true /* force */);
}
status_t AudioFlinger::ThreadBase::readyToRun()
@@ -571,6 +573,15 @@
// sendIoConfigEvent_l() must be called with ThreadBase::mLock held
void AudioFlinger::ThreadBase::sendIoConfigEvent_l(audio_io_config_event event, pid_t pid)
{
+ // The audio statistics history is exponentially weighted to forget events
+ // about five or more seconds in the past. In order to have
+ // crisper statistics for mediametrics, we reset the statistics on
+ // an IoConfigEvent, to reflect different properties for a new device.
+ mIoJitterMs.reset();
+ mLatencyMs.reset();
+ mProcessTimeMs.reset();
+ mTimestampVerifier.discontinuity();
+
sp<ConfigEvent> configEvent = (ConfigEvent *)new IoConfigEvent(event, pid);
sendConfigEvent_l(configEvent);
}
@@ -1651,6 +1662,65 @@
mWaitWorkCV.broadcast();
}
+// Call only from threadLoop() or when it is idle.
+// Do not call from high performance code as this may do binder rpc to the MediaMetrics service.
+void AudioFlinger::ThreadBase::sendStatistics(bool force)
+{
+ // Do not log if we have no stats.
+ // We choose the timestamp verifier because it is the most likely item to be present.
+ const int64_t nstats = mTimestampVerifier.getN() - mLastRecordedTimestampVerifierN;
+ if (nstats == 0) {
+ return;
+ }
+
+ // Don't log more frequently than once per 12 hours.
+ // We use BOOTTIME to include suspend time.
+ const int64_t timeNs = systemTime(SYSTEM_TIME_BOOTTIME);
+ const int64_t sinceNs = timeNs - mLastRecordedTimeNs; // ok if mLastRecordedTimeNs = 0
+ if (!force && sinceNs <= 12 * NANOS_PER_HOUR) {
+ return;
+ }
+
+ mLastRecordedTimestampVerifierN = mTimestampVerifier.getN();
+ mLastRecordedTimeNs = timeNs;
+
+ std::unique_ptr<MediaAnalyticsItem> item(MediaAnalyticsItem::create("audiothread"));
+
+#define MM_PREFIX "android.media.audiothread." // avoid cut-n-paste errors.
+
+ // thread configuration
+ item->setInt32(MM_PREFIX "id", (int32_t)mId); // IO handle
+ // item->setInt32(MM_PREFIX "portId", (int32_t)mPortId);
+ item->setCString(MM_PREFIX "type", threadTypeToString(mType));
+ item->setInt32(MM_PREFIX "sampleRate", (int32_t)mSampleRate);
+ item->setInt64(MM_PREFIX "channelMask", (int64_t)mChannelMask);
+ item->setCString(MM_PREFIX "encoding", toString(mFormat).c_str());
+ item->setInt32(MM_PREFIX "frameCount", (int32_t)mFrameCount);
+ item->setCString(MM_PREFIX "outDevice", toString(mOutDevice).c_str());
+ item->setCString(MM_PREFIX "inDevice", toString(mInDevice).c_str());
+
+ // thread statistics
+ if (mIoJitterMs.getN() > 0) {
+ item->setDouble(MM_PREFIX "ioJitterMs.mean", mIoJitterMs.getMean());
+ item->setDouble(MM_PREFIX "ioJitterMs.std", mIoJitterMs.getStdDev());
+ }
+ if (mProcessTimeMs.getN() > 0) {
+ item->setDouble(MM_PREFIX "processTimeMs.mean", mProcessTimeMs.getMean());
+ item->setDouble(MM_PREFIX "processTimeMs.std", mProcessTimeMs.getStdDev());
+ }
+ const auto tsjitter = mTimestampVerifier.getJitterMs();
+ if (tsjitter.getN() > 0) {
+ item->setDouble(MM_PREFIX "timestampJitterMs.mean", tsjitter.getMean());
+ item->setDouble(MM_PREFIX "timestampJitterMs.std", tsjitter.getStdDev());
+ }
+ if (mLatencyMs.getN() > 0) {
+ item->setDouble(MM_PREFIX "latencyMs.mean", mLatencyMs.getMean());
+ item->setDouble(MM_PREFIX "latencyMs.std", mLatencyMs.getStdDev());
+ }
+
+ item->selfrecord();
+}
+
// ----------------------------------------------------------------------------
// Playback
// ----------------------------------------------------------------------------
@@ -3447,6 +3517,7 @@
LOG_AUDIO_STATE();
}
mStandby = true;
+ sendStatistics(false /* force */);
}
if (mActiveTracks.isEmpty() && mConfigEvents.isEmpty()) {
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index 1afea08..97aa9f0 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -399,6 +399,9 @@
virtual void dump(int fd, const Vector<String16>& args) = 0;
+ // deliver stats to mediametrics.
+ void sendStatistics(bool force);
+
mutable Mutex mLock;
protected:
@@ -522,6 +525,10 @@
audio_utils::Statistics<double> mProcessTimeMs{0.995 /* alpha */};
audio_utils::Statistics<double> mLatencyMs{0.995 /* alpha */};
+ // Save the last count when we delivered statistics to mediametrics.
+ int64_t mLastRecordedTimestampVerifierN = 0;
+ int64_t mLastRecordedTimeNs = 0; // BOOTTIME to include suspend.
+
bool mIsMsdDevice = false;
// A condition that must be evaluated by the thread loop has changed and
// we must not wait for async write callback in the thread loop before evaluating it