Merge "audiopolicy: Brush up DeviceVector::getDevice... methods"
diff --git a/drm/libmediadrm/CryptoHal.cpp b/drm/libmediadrm/CryptoHal.cpp
index ad1ccbc..3035c5a 100644
--- a/drm/libmediadrm/CryptoHal.cpp
+++ b/drm/libmediadrm/CryptoHal.cpp
@@ -269,11 +269,15 @@
* TODO: Add a releaseSharedBuffer method in a future DRM HAL
* API version to make this explicit.
*/
- uint32_t bufferId = mHeapBases.valueFor(seqNum).getBufferId();
- Return<void> hResult = mPlugin->setSharedBufferBase(hidl_memory(), bufferId);
- ALOGE_IF(!hResult.isOk(), "setSharedBufferBase(): remote call failed");
-
- mHeapBases.removeItem(seqNum);
+ ssize_t index = mHeapBases.indexOfKey(seqNum);
+ if (index >= 0) {
+ if (mPlugin != NULL) {
+ uint32_t bufferId = mHeapBases[index].getBufferId();
+ Return<void> hResult = mPlugin->setSharedBufferBase(hidl_memory(), bufferId);
+ ALOGE_IF(!hResult.isOk(), "setSharedBufferBase(): remote call failed");
+ }
+ mHeapBases.removeItem(seqNum);
+ }
}
status_t CryptoHal::toSharedBuffer(const sp<IMemory>& memory, int32_t seqNum, ::SharedBuffer* buffer) {
diff --git a/include/private/media/AudioTrackShared.h b/include/private/media/AudioTrackShared.h
index ca119d5..518cc63 100644
--- a/include/private/media/AudioTrackShared.h
+++ b/include/private/media/AudioTrackShared.h
@@ -538,6 +538,10 @@
mTimestampMutator.push(timestamp);
}
+ virtual ExtendedTimestamp getTimestamp() const {
+ return mTimestampMutator.last();
+ }
+
// Flushes the shared ring buffer if the client had requested it using mStreaming.mFlush.
// If flush occurs then:
// cblk->u.mStreaming.mFront, ServerProxy::mFlush and ServerProxy::mFlushed will be modified
diff --git a/media/extractors/mp4/MPEG4Extractor.cpp b/media/extractors/mp4/MPEG4Extractor.cpp
index ce5ca63..1a56edc 100644
--- a/media/extractors/mp4/MPEG4Extractor.cpp
+++ b/media/extractors/mp4/MPEG4Extractor.cpp
@@ -111,6 +111,8 @@
int32_t mCryptoMode; // passed in from extractor
int32_t mDefaultIVSize; // passed in from extractor
uint8_t mCryptoKey[16]; // passed in from extractor
+ int32_t mDefaultEncryptedByteBlock;
+ int32_t mDefaultSkipByteBlock;
uint32_t mCurrentAuxInfoType;
uint32_t mCurrentAuxInfoTypeParameter;
int32_t mCurrentDefaultSampleInfoSize;
@@ -144,6 +146,8 @@
status_t parseTrackFragmentRun(off64_t offset, off64_t size);
status_t parseSampleAuxiliaryInformationSizes(off64_t offset, off64_t size);
status_t parseSampleAuxiliaryInformationOffsets(off64_t offset, off64_t size);
+ status_t parseClearEncryptedSizes(off64_t offset, bool isSubsampleEncryption, uint32_t flags);
+ status_t parseSampleEncryption(off64_t offset);
struct TrackFragmentHeaderInfo {
enum Flags {
@@ -921,6 +925,7 @@
track->timescale = 0;
track->meta.setCString(kKeyMIMEType, "application/octet-stream");
track->has_elst = false;
+ track->subsample_encryption = false;
}
off64_t stop_offset = *offset + chunk_size;
@@ -980,6 +985,49 @@
break;
}
+ case FOURCC('s', 'c', 'h', 'm'):
+ {
+
+ *offset += chunk_size;
+ if (!mLastTrack) {
+ return ERROR_MALFORMED;
+ }
+
+ uint32_t scheme_type;
+ if (mDataSource->readAt(data_offset + 4, &scheme_type, 4) < 4) {
+ return ERROR_IO;
+ }
+ scheme_type = ntohl(scheme_type);
+ int32_t mode = kCryptoModeUnencrypted;
+ switch(scheme_type) {
+ case FOURCC('c', 'b', 'c', '1'):
+ {
+ mode = kCryptoModeAesCbc;
+ break;
+ }
+ case FOURCC('c', 'b', 'c', 's'):
+ {
+ mode = kCryptoModeAesCbc;
+ mLastTrack->subsample_encryption = true;
+ break;
+ }
+ case FOURCC('c', 'e', 'n', 'c'):
+ {
+ mode = kCryptoModeAesCtr;
+ break;
+ }
+ case FOURCC('c', 'e', 'n', 's'):
+ {
+ mode = kCryptoModeAesCtr;
+ mLastTrack->subsample_encryption = true;
+ break;
+ }
+ }
+ mLastTrack->meta.setInt32(kKeyCryptoMode, mode);
+ break;
+ }
+
+
case FOURCC('e', 'l', 's', 't'):
{
*offset += chunk_size;
@@ -1071,31 +1119,54 @@
// tenc box contains 1 byte version, 3 byte flags, 3 byte default algorithm id, one byte
// default IV size, 16 bytes default KeyID
// (ISO 23001-7)
- char buf[4];
+
+ uint8_t version;
+ if (mDataSource->readAt(data_offset, &version, sizeof(version))
+ < (ssize_t)sizeof(version)) {
+ return ERROR_IO;
+ }
+
+ uint8_t buf[4];
memset(buf, 0, 4);
if (mDataSource->readAt(data_offset + 4, buf + 1, 3) < 3) {
return ERROR_IO;
}
- uint32_t defaultAlgorithmId = ntohl(*((int32_t*)buf));
- if (defaultAlgorithmId > 1) {
- // only 0 (clear) and 1 (AES-128) are valid
+
+ if (mLastTrack == NULL) {
return ERROR_MALFORMED;
}
+ uint8_t defaultEncryptedByteBlock = 0;
+ uint8_t defaultSkipByteBlock = 0;
+ uint32_t defaultAlgorithmId = ntohl(*((int32_t*)buf));
+ if (version == 1) {
+ uint32_t pattern = buf[2];
+ defaultEncryptedByteBlock = pattern >> 4;
+ defaultSkipByteBlock = pattern & 0xf;
+ if (defaultEncryptedByteBlock == 0 && defaultSkipByteBlock == 0) {
+ // use (1,0) to mean "encrypt everything"
+ defaultEncryptedByteBlock = 1;
+ }
+ } else if (mLastTrack->subsample_encryption) {
+ ALOGW("subsample_encryption should be version 1");
+ } else if (defaultAlgorithmId > 1) {
+ // only 0 (clear) and 1 (AES-128) are valid
+ ALOGW("defaultAlgorithmId: %u is a reserved value", defaultAlgorithmId);
+ defaultAlgorithmId = 1;
+ }
+
memset(buf, 0, 4);
if (mDataSource->readAt(data_offset + 7, buf + 3, 1) < 1) {
return ERROR_IO;
}
uint32_t defaultIVSize = ntohl(*((int32_t*)buf));
- if ((defaultAlgorithmId == 0 && defaultIVSize != 0) ||
- (defaultAlgorithmId != 0 && defaultIVSize == 0)) {
+ if (defaultAlgorithmId == 0 && defaultIVSize != 0) {
// only unencrypted data must have 0 IV size
return ERROR_MALFORMED;
} else if (defaultIVSize != 0 &&
defaultIVSize != 8 &&
defaultIVSize != 16) {
- // only supported sizes are 0, 8 and 16
return ERROR_MALFORMED;
}
@@ -1105,12 +1176,41 @@
return ERROR_IO;
}
- if (mLastTrack == NULL)
- return ERROR_MALFORMED;
+ sp<ABuffer> defaultConstantIv;
+ if (defaultAlgorithmId != 0 && defaultIVSize == 0) {
- mLastTrack->meta.setInt32(kKeyCryptoMode, defaultAlgorithmId);
+ uint8_t ivlength;
+ if (mDataSource->readAt(data_offset + 24, &ivlength, sizeof(ivlength))
+ < (ssize_t)sizeof(ivlength)) {
+ return ERROR_IO;
+ }
+
+ if (ivlength != 8 && ivlength != 16) {
+ ALOGW("unsupported IV length: %u", ivlength);
+ return ERROR_MALFORMED;
+ }
+
+ defaultConstantIv = new ABuffer(ivlength);
+ if (mDataSource->readAt(data_offset + 25, defaultConstantIv->data(), ivlength)
+ < (ssize_t)ivlength) {
+ return ERROR_IO;
+ }
+
+ defaultConstantIv->setRange(0, ivlength);
+ }
+
+ int32_t tmpAlgorithmId;
+ if (!mLastTrack->meta.findInt32(kKeyCryptoMode, &tmpAlgorithmId)) {
+ mLastTrack->meta.setInt32(kKeyCryptoMode, defaultAlgorithmId);
+ }
+
mLastTrack->meta.setInt32(kKeyCryptoDefaultIVSize, defaultIVSize);
mLastTrack->meta.setData(kKeyCryptoKey, 'tenc', defaultKeyId, 16);
+ mLastTrack->meta.setInt32(kKeyEncryptedByteBlock, defaultEncryptedByteBlock);
+ mLastTrack->meta.setInt32(kKeySkipByteBlock, defaultSkipByteBlock);
+ if (defaultConstantIv != NULL) {
+ mLastTrack->meta.setData(kKeyCryptoIV, 'dciv', defaultConstantIv->data(), defaultConstantIv->size());
+ }
break;
}
@@ -1610,7 +1710,10 @@
const char *mime;
CHECK(mLastTrack->meta.findCString(kKeyMIMEType, &mime));
- if (!strcmp(mime, MEDIA_MIMETYPE_VIDEO_AVC)
+ if (!strncmp(mime, "audio/", 6)) {
+ // for audio, use 128KB
+ max_size = 1024 * 128;
+ } else if (!strcmp(mime, MEDIA_MIMETYPE_VIDEO_AVC)
|| !strcmp(mime, MEDIA_MIMETYPE_VIDEO_HEVC)) {
// AVC & HEVC requires compression ratio of at least 2, and uses
// macroblocks
@@ -3744,6 +3847,8 @@
mCurrentMoofOffset(firstMoofOffset),
mNextMoofOffset(-1),
mCurrentTime(0),
+ mDefaultEncryptedByteBlock(0),
+ mDefaultSkipByteBlock(0),
mCurrentSampleInfoAllocSize(0),
mCurrentSampleInfoSizes(NULL),
mCurrentSampleInfoOffsetsAllocSize(0),
@@ -3773,6 +3878,9 @@
memcpy(mCryptoKey, key, keysize);
}
+ mFormat.findInt32(kKeyEncryptedByteBlock, &mDefaultEncryptedByteBlock);
+ mFormat.findInt32(kKeySkipByteBlock, &mDefaultSkipByteBlock);
+
const char *mime;
bool success = mFormat.findCString(kKeyMIMEType, &mime);
CHECK(success);
@@ -3858,9 +3966,10 @@
}
// Allow up to kMaxBuffers, but not if the total exceeds kMaxBufferSize.
+ const size_t kInitialBuffers = 2;
const size_t kMaxBuffers = 8;
- const size_t buffers = min(kMaxBufferSize / max_size, kMaxBuffers);
- mGroup = new MediaBufferGroup(buffers, max_size);
+ const size_t realMaxBuffers = min(kMaxBufferSize / max_size, kMaxBuffers);
+ mGroup = new MediaBufferGroup(kInitialBuffers, max_size, realMaxBuffers);
mSrcBuffer = new (std::nothrow) uint8_t[max_size];
if (mSrcBuffer == NULL) {
// file probably specified a bad max size
@@ -4018,6 +4127,15 @@
break;
}
+ case FOURCC('s', 'e', 'n', 'c'): {
+ status_t err;
+ if ((err = parseSampleEncryption(data_offset)) != OK) {
+ return err;
+ }
+ *offset += chunk_size;
+ break;
+ }
+
case FOURCC('m', 'd', 'a', 't'): {
// parse DRM info if present
ALOGV("MPEG4Source::parseChunk mdat");
@@ -4168,6 +4286,12 @@
off64_t drmoffset = mCurrentSampleInfoOffsets[0]; // from moof
drmoffset += mCurrentMoofOffset;
+
+ return parseClearEncryptedSizes(drmoffset, false, 0);
+}
+
+status_t MPEG4Source::parseClearEncryptedSizes(off64_t offset, bool isSubsampleEncryption, uint32_t flags) {
+
int ivlength;
CHECK(mFormat.findInt32(kKeyCryptoDefaultIVSize, &ivlength));
@@ -4176,42 +4300,61 @@
ALOGW("unsupported IV length: %d", ivlength);
return ERROR_MALFORMED;
}
+
+ uint32_t sampleCount = mCurrentSampleInfoCount;
+ if (isSubsampleEncryption) {
+ if (!mDataSource->getUInt32(offset, &sampleCount)) {
+ return ERROR_IO;
+ }
+ offset += 4;
+ }
+
// read CencSampleAuxiliaryDataFormats
- for (size_t i = 0; i < mCurrentSampleInfoCount; i++) {
+ for (size_t i = 0; i < sampleCount; i++) {
if (i >= mCurrentSamples.size()) {
ALOGW("too few samples");
break;
}
Sample *smpl = &mCurrentSamples.editItemAt(i);
+ if (!smpl->clearsizes.isEmpty()) {
+ continue;
+ }
memset(smpl->iv, 0, 16);
- if (mDataSource->readAt(drmoffset, smpl->iv, ivlength) != ivlength) {
+ if (mDataSource->readAt(offset, smpl->iv, ivlength) != ivlength) {
return ERROR_IO;
}
- drmoffset += ivlength;
+ offset += ivlength;
- int32_t smplinfosize = mCurrentDefaultSampleInfoSize;
- if (smplinfosize == 0) {
- smplinfosize = mCurrentSampleInfoSizes[i];
+ bool readSubsamples;
+ if (isSubsampleEncryption) {
+ readSubsamples = flags & 2;
+ } else {
+ int32_t smplinfosize = mCurrentDefaultSampleInfoSize;
+ if (smplinfosize == 0) {
+ smplinfosize = mCurrentSampleInfoSizes[i];
+ }
+ readSubsamples = smplinfosize > ivlength;
}
- if (smplinfosize > ivlength) {
+
+ if (readSubsamples) {
uint16_t numsubsamples;
- if (!mDataSource->getUInt16(drmoffset, &numsubsamples)) {
+ if (!mDataSource->getUInt16(offset, &numsubsamples)) {
return ERROR_IO;
}
- drmoffset += 2;
+ offset += 2;
for (size_t j = 0; j < numsubsamples; j++) {
uint16_t numclear;
uint32_t numencrypted;
- if (!mDataSource->getUInt16(drmoffset, &numclear)) {
+ if (!mDataSource->getUInt16(offset, &numclear)) {
return ERROR_IO;
}
- drmoffset += 2;
- if (!mDataSource->getUInt32(drmoffset, &numencrypted)) {
+ offset += 2;
+ if (!mDataSource->getUInt32(offset, &numencrypted)) {
return ERROR_IO;
}
- drmoffset += 4;
+ offset += 4;
smpl->clearsizes.add(numclear);
smpl->encryptedsizes.add(numencrypted);
}
@@ -4221,10 +4364,17 @@
}
}
-
return OK;
}
+status_t MPEG4Source::parseSampleEncryption(off64_t offset) {
+ uint32_t flags;
+ if (!mDataSource->getUInt32(offset, &flags)) { // actually version + flags
+ return ERROR_MALFORMED;
+ }
+ return parseClearEncryptedSizes(offset + 4, true, flags);
+}
+
status_t MPEG4Source::parseTrackFragmentHeader(off64_t offset, off64_t size) {
if (size < 8) {
@@ -4476,6 +4626,7 @@
tmp.size = sampleSize;
tmp.duration = sampleDuration;
tmp.compositionOffset = sampleCtsOffset;
+ memset(tmp.iv, 0, sizeof(tmp.iv));
mCurrentSamples.add(tmp);
dataOffset += sampleSize;
@@ -4980,10 +5131,22 @@
smpl->clearsizes.array(), smpl->clearsizes.size() * 4);
bufmeta.setData(kKeyEncryptedSizes, 0,
smpl->encryptedsizes.array(), smpl->encryptedsizes.size() * 4);
- bufmeta.setData(kKeyCryptoIV, 0, smpl->iv, 16); // use 16 or the actual size?
bufmeta.setInt32(kKeyCryptoDefaultIVSize, mDefaultIVSize);
bufmeta.setInt32(kKeyCryptoMode, mCryptoMode);
bufmeta.setData(kKeyCryptoKey, 0, mCryptoKey, 16);
+ bufmeta.setInt32(kKeyEncryptedByteBlock, mDefaultEncryptedByteBlock);
+ bufmeta.setInt32(kKeySkipByteBlock, mDefaultSkipByteBlock);
+
+ uint32_t type = 0;
+ const void *iv = NULL;
+ size_t ivlength = 0;
+ if (!mFormat.findData(
+ kKeyCryptoIV, &type, &iv, &ivlength)) {
+ iv = smpl->iv;
+ ivlength = 16; // use 16 or the actual size?
+ }
+ bufmeta.setData(kKeyCryptoIV, 0, iv, ivlength);
+
}
if ((!mIsAVC && !mIsHEVC)|| mWantsNALFragments) {
diff --git a/media/extractors/mp4/MPEG4Extractor.h b/media/extractors/mp4/MPEG4Extractor.h
index 831f120..3ea0963 100644
--- a/media/extractors/mp4/MPEG4Extractor.h
+++ b/media/extractors/mp4/MPEG4Extractor.h
@@ -85,6 +85,7 @@
bool has_elst;
int64_t elst_media_time;
uint64_t elst_segment_duration;
+ bool subsample_encryption;
};
Vector<SidxEntry> mSidxEntries;
diff --git a/media/libaaudio/examples/loopback/src/LoopbackAnalyzer.h b/media/libaaudio/examples/loopback/src/LoopbackAnalyzer.h
index f618d3d..ef9a753 100644
--- a/media/libaaudio/examples/loopback/src/LoopbackAnalyzer.h
+++ b/media/libaaudio/examples/loopback/src/LoopbackAnalyzer.h
@@ -41,12 +41,16 @@
#define MAX_ZEROTH_PARTIAL_BINS 40
constexpr double MAX_ECHO_GAIN = 10.0; // based on experiments, otherwise autocorrelation too noisy
+// A narrow impulse seems to have better immunity against over estimating the
+// latency due to detecting subharmonics by the auto-correlator.
static const float s_Impulse[] = {
- 0.0f, 0.0f, 0.0f, 0.0f, 0.2f, // silence on each side of the impulse
- 0.5f, 0.9999f, 0.0f, -0.9999, -0.5f, // bipolar
- -0.2f, 0.0f, 0.0f, 0.0f, 0.0f
+ 0.0f, 0.0f, 0.0f, 0.0f, 0.3f, // silence on each side of the impulse
+ 0.99f, 0.0f, -0.99f, // bipolar with one zero crossing in middle
+ -0.3f, 0.0f, 0.0f, 0.0f, 0.0f
};
+constexpr int32_t kImpulseSizeInFrames = (int32_t)(sizeof(s_Impulse) / sizeof(s_Impulse[0]));
+
class PseudoRandom {
public:
PseudoRandom() {}
@@ -498,13 +502,23 @@
printf("st = %d, echo gain = %f ", mState, mEchoGain);
}
- static void sendImpulse(float *outputData, int outputChannelCount) {
- for (float sample : s_Impulse) {
+ void sendImpulses(float *outputData, int outputChannelCount, int numFrames) {
+ while (numFrames-- > 0) {
+ float sample = s_Impulse[mSampleIndex++];
+ if (mSampleIndex >= kImpulseSizeInFrames) {
+ mSampleIndex = 0;
+ }
+
*outputData = sample;
outputData += outputChannelCount;
}
}
+ void sendOneImpulse(float *outputData, int outputChannelCount) {
+ mSampleIndex = 0;
+ sendImpulses(outputData, outputChannelCount, kImpulseSizeInFrames);
+ }
+
void process(float *inputData, int inputChannelCount,
float *outputData, int outputChannelCount,
int numFrames) override {
@@ -530,7 +544,7 @@
break;
case STATE_MEASURING_GAIN:
- sendImpulse(outputData, outputChannelCount);
+ sendImpulses(outputData, outputChannelCount, numFrames);
peak = measurePeakAmplitude(inputData, inputChannelCount, numFrames);
// If we get several in a row then go to next state.
if (peak > mPulseThreshold) {
@@ -548,7 +562,7 @@
nextState = STATE_WAITING_FOR_SILENCE;
}
}
- } else {
+ } else if (numFrames > kImpulseSizeInFrames){ // ignore short callbacks
mDownCounter = 8;
}
break;
@@ -574,7 +588,7 @@
case STATE_SENDING_PULSE:
mAudioRecording.write(inputData, inputChannelCount, numFrames);
- sendImpulse(outputData, outputChannelCount);
+ sendOneImpulse(outputData, outputChannelCount);
nextState = STATE_GATHERING_ECHOS;
//printf("%5d: switch to STATE_GATHERING_ECHOS\n", mLoopCounter);
break;
@@ -634,8 +648,9 @@
STATE_FAILED
};
- int mDownCounter = 500;
- int mLoopCounter = 0;
+ int32_t mDownCounter = 500;
+ int32_t mLoopCounter = 0;
+ int32_t mSampleIndex = 0;
float mPulseThreshold = 0.02f;
float mSilenceThreshold = 0.002f;
float mMeasuredLoopGain = 0.0f;
diff --git a/media/libaaudio/examples/loopback/src/loopback.cpp b/media/libaaudio/examples/loopback/src/loopback.cpp
index 26d1e4b..91ebf73 100644
--- a/media/libaaudio/examples/loopback/src/loopback.cpp
+++ b/media/libaaudio/examples/loopback/src/loopback.cpp
@@ -42,7 +42,7 @@
#define NUM_INPUT_CHANNELS 1
#define FILENAME_ALL "/data/loopback_all.wav"
#define FILENAME_ECHOS "/data/loopback_echos.wav"
-#define APP_VERSION "0.2.03"
+#define APP_VERSION "0.2.04"
constexpr int kNumCallbacksToDrain = 20;
constexpr int kNumCallbacksToDiscard = 20;
@@ -98,6 +98,9 @@
framesRead = AAudioStream_read(myData->inputStream, myData->inputFloatData,
numFrames,
0 /* timeoutNanoseconds */);
+ } else {
+ printf("ERROR actualInputFormat = %d\n", myData->actualInputFormat);
+ assert(false);
}
if (framesRead < 0) {
myData->inputError = framesRead;
@@ -121,7 +124,7 @@
float *outputData = (float *) audioData;
// Read audio data from the input stream.
- int32_t framesRead;
+ int32_t actualFramesRead;
if (numFrames > myData->inputFramesMaximum) {
myData->inputError = AAUDIO_ERROR_OUT_OF_RANGE;
@@ -141,16 +144,23 @@
if (myData->numCallbacksToDrain > 0) {
// Drain the input.
+ int32_t totalFramesRead = 0;
do {
- framesRead = readFormattedData(myData, numFrames);
+ actualFramesRead = readFormattedData(myData, numFrames);
+ if (actualFramesRead) {
+ totalFramesRead += actualFramesRead;
+ }
// Ignore errors because input stream may not be started yet.
- } while (framesRead > 0);
- myData->numCallbacksToDrain--;
+ } while (actualFramesRead > 0);
+ // Only counts if we actually got some data.
+ if (totalFramesRead > 0) {
+ myData->numCallbacksToDrain--;
+ }
} else if (myData->numCallbacksToDiscard > 0) {
// Ignore. Allow the input to fill back up to equilibrium with the output.
- framesRead = readFormattedData(myData, numFrames);
- if (framesRead < 0) {
+ actualFramesRead = readFormattedData(myData, numFrames);
+ if (actualFramesRead < 0) {
result = AAUDIO_CALLBACK_RESULT_STOP;
}
myData->numCallbacksToDiscard--;
@@ -164,21 +174,29 @@
int64_t inputFramesWritten = AAudioStream_getFramesWritten(myData->inputStream);
int64_t inputFramesRead = AAudioStream_getFramesRead(myData->inputStream);
int64_t framesAvailable = inputFramesWritten - inputFramesRead;
- framesRead = readFormattedData(myData, numFrames);
- if (framesRead < 0) {
+ actualFramesRead = readFormattedData(myData, numFrames);
+ if (actualFramesRead < 0) {
result = AAUDIO_CALLBACK_RESULT_STOP;
} else {
- if (framesRead < numFrames) {
- if(framesRead < (int32_t) framesAvailable) {
- printf("insufficient but numFrames = %d, framesRead = %d, available = %d\n",
- numFrames, framesRead, (int) framesAvailable);
+ if (actualFramesRead < numFrames) {
+ if(actualFramesRead < (int32_t) framesAvailable) {
+ printf("insufficient but numFrames = %d"
+ ", actualFramesRead = %d"
+ ", inputFramesWritten = %d"
+ ", inputFramesRead = %d"
+ ", available = %d\n",
+ numFrames,
+ actualFramesRead,
+ (int) inputFramesWritten,
+ (int) inputFramesRead,
+ (int) framesAvailable);
}
myData->insufficientReadCount++;
- myData->insufficientReadFrames += numFrames - framesRead; // deficit
+ myData->insufficientReadFrames += numFrames - actualFramesRead; // deficit
}
- int32_t numSamples = framesRead * myData->actualInputChannelCount;
+ int32_t numSamples = actualFramesRead * myData->actualInputChannelCount;
if (myData->actualInputFormat == AAUDIO_FORMAT_PCM_I16) {
convertPcm16ToFloat(myData->inputShortData, myData->inputFloatData, numSamples);
@@ -216,6 +234,7 @@
static void usage() {
printf("Usage: aaudio_loopback [OPTION]...\n\n");
AAudioArgsParser::usage();
+ printf(" -B{frames} input capacity in frames\n");
printf(" -C{channels} number of input channels\n");
printf(" -F{0,1,2} input format, 1=I16, 2=FLOAT\n");
printf(" -g{gain} recirculating loopback gain\n");
@@ -312,16 +331,18 @@
AAudioSimplePlayer player;
AAudioSimpleRecorder recorder;
LoopbackData loopbackData;
- AAudioStream *inputStream = nullptr;
+ AAudioStream *inputStream = nullptr;
AAudioStream *outputStream = nullptr;
aaudio_result_t result = AAUDIO_OK;
aaudio_sharing_mode_t requestedInputSharingMode = AAUDIO_SHARING_MODE_SHARED;
int requestedInputChannelCount = NUM_INPUT_CHANNELS;
aaudio_format_t requestedInputFormat = AAUDIO_FORMAT_UNSPECIFIED;
- const aaudio_format_t requestedOutputFormat = AAUDIO_FORMAT_PCM_FLOAT;
+ int32_t requestedInputCapacity = -1;
aaudio_performance_mode_t inputPerformanceLevel = AAUDIO_PERFORMANCE_MODE_LOW_LATENCY;
+ int32_t outputFramesPerBurst = 0;
+
aaudio_format_t actualOutputFormat = AAUDIO_FORMAT_INVALID;
int32_t actualSampleRate = 0;
int written = 0;
@@ -342,6 +363,9 @@
if (arg[0] == '-') {
char option = arg[1];
switch (option) {
+ case 'B':
+ requestedInputCapacity = atoi(&arg[2]);
+ break;
case 'C':
requestedInputChannelCount = atoi(&arg[2]);
break;
@@ -408,7 +432,6 @@
}
printf("OUTPUT stream ----------------------------------------\n");
- argParser.setFormat(requestedOutputFormat);
result = player.open(argParser, MyDataCallbackProc, MyErrorCallbackProc, &loopbackData);
if (result != AAUDIO_OK) {
fprintf(stderr, "ERROR - player.open() returned %d\n", result);
@@ -417,11 +440,15 @@
outputStream = player.getStream();
actualOutputFormat = AAudioStream_getFormat(outputStream);
- assert(actualOutputFormat == AAUDIO_FORMAT_PCM_FLOAT);
+ if (actualOutputFormat != AAUDIO_FORMAT_PCM_FLOAT) {
+ fprintf(stderr, "ERROR - only AAUDIO_FORMAT_PCM_FLOAT supported\n");
+ exit(1);
+ }
actualSampleRate = AAudioStream_getSampleRate(outputStream);
loopbackData.audioRecording.allocate(recordingDuration * actualSampleRate);
loopbackData.audioRecording.setSampleRate(actualSampleRate);
+ outputFramesPerBurst = AAudioStream_getFramesPerBurst(outputStream);
argParser.compareWithStream(outputStream);
@@ -435,8 +462,11 @@
// Make sure the input buffer has plenty of capacity.
// Extra capacity on input should not increase latency if we keep it drained.
- int32_t outputBufferCapacity = AAudioStream_getBufferCapacityInFrames(outputStream);
- int32_t inputBufferCapacity = 2 * outputBufferCapacity;
+ int32_t inputBufferCapacity = requestedInputCapacity;
+ if (inputBufferCapacity < 0) {
+ int32_t outputBufferCapacity = AAudioStream_getBufferCapacityInFrames(outputStream);
+ inputBufferCapacity = 2 * outputBufferCapacity;
+ }
argParser.setBufferCapacity(inputBufferCapacity);
result = recorder.open(argParser);
@@ -457,6 +487,15 @@
argParser.compareWithStream(inputStream);
+ // If the input stream is too small then we cannot satisfy the output callback.
+ {
+ int32_t actualCapacity = AAudioStream_getBufferCapacityInFrames(inputStream);
+ if (actualCapacity < 2 * outputFramesPerBurst) {
+ fprintf(stderr, "ERROR - input capacity < 2 * outputFramesPerBurst\n");
+ goto finish;
+ }
+ }
+
// ------- Setup loopbackData -----------------------------
loopbackData.actualInputFormat = AAudioStream_getFormat(inputStream);
@@ -499,7 +538,7 @@
printf(" ERROR on output stream\n");
break;
} else if (loopbackData.isDone) {
- printf(" test says it is done!\n");
+ printf(" Test says it is DONE!\n");
break;
} else {
// Log a line of stream data.
diff --git a/media/libaaudio/examples/loopback/src/loopback.sh b/media/libaaudio/examples/loopback/src/loopback.sh
index bc63125..a5712b8 100644
--- a/media/libaaudio/examples/loopback/src/loopback.sh
+++ b/media/libaaudio/examples/loopback/src/loopback.sh
@@ -1,10 +1,30 @@
#!/system/bin/sh
# Run a loopback test in the background after a delay.
-# To run the script enter:
+# To run the script, enter these commands once:
+# adb disable-verity
+# adb reboot
+# adb remount
+# adb sync
+# adb push loopback.sh /data/
+# For each test run:
# adb shell "nohup sh /data/loopback.sh &"
+# Quickly connect USB audio if needed, either manually or via Tigertail switch.
+# Wait until the test completes, restore USB to host if needed, and then:
+# adb pull /data/loopreport.txt
+# adb pull /data/loopback_all.wav
+# adb pull /data/loopback_echos.wav
SLEEP_TIME=10
-TEST_COMMAND="aaudio_loopback -pl -Pl -C1 -n2 -m2 -tm -d5"
+TEST_COMMAND="/data/nativetest/aaudio_loopback/aaudio_loopback -pl -Pl -C1 -n2 -m2 -te -d5"
+# Partial list of options:
+# -pl (output) performance mode: low latency
+# -Pl input performance mode: low latency
+# -C1 input channel count: 1
+# -n2 number of bursts: 2
+# -m2 mmap policy: 2
+# -t? test mode: -tm for sine magnitude, -te for echo latency, -tf for file latency
+# -d5 device ID
+# For full list of available options, see AAudioArgsParser.h and loopback.cpp
echo "Plug in USB Mir and Fun Plug."
echo "Test will start in ${SLEEP_TIME} seconds: ${TEST_COMMAND}"
diff --git a/media/libaaudio/include/aaudio/AAudio.h b/media/libaaudio/include/aaudio/AAudio.h
index 2207cb8c..5b29419 100644
--- a/media/libaaudio/include/aaudio/AAudio.h
+++ b/media/libaaudio/include/aaudio/AAudio.h
@@ -44,7 +44,15 @@
#define AAUDIO_UNSPECIFIED 0
enum {
+ /**
+ * Audio data will travel out of the device, for example through a speaker.
+ */
AAUDIO_DIRECTION_OUTPUT,
+
+
+ /**
+ * Audio data will travel into the device, for example from a microphone.
+ */
AAUDIO_DIRECTION_INPUT
};
typedef int32_t aaudio_direction_t;
@@ -52,33 +60,112 @@
enum {
AAUDIO_FORMAT_INVALID = -1,
AAUDIO_FORMAT_UNSPECIFIED = 0,
+
+ /**
+ * This format uses the int16_t data type.
+ * The maximum range of the data is -32768 to 32767.
+ */
AAUDIO_FORMAT_PCM_I16,
+
+ /**
+ * This format uses the float data type.
+ * The nominal range of the data is [-1.0f, 1.0f).
+ * Values outside that range may be clipped.
+ *
+ * See also 'floatData' at
+ * https://developer.android.com/reference/android/media/AudioTrack#write(float[],%20int,%20int,%20int)
+ */
AAUDIO_FORMAT_PCM_FLOAT
};
typedef int32_t aaudio_format_t;
+/**
+ * These result codes are returned from AAudio functions to indicate success or failure.
+ * Note that error return codes may change in the future so applications should generally
+ * not rely on specific return codes.
+ */
enum {
+ /**
+ * The call was successful.
+ */
AAUDIO_OK,
AAUDIO_ERROR_BASE = -900, // TODO review
+
+ /**
+ * The audio device was disconnected. This could occur, for example, when headphones
+ * are plugged in or unplugged. The stream cannot be used after the device is disconnected.
+ * Applications should stop and close the stream.
+ * If this error is received in an error callback then another thread should be
+ * used to stop and close the stream.
+ */
AAUDIO_ERROR_DISCONNECTED,
+
+ /**
+ * An invalid parameter was passed to AAudio.
+ */
AAUDIO_ERROR_ILLEGAL_ARGUMENT,
// reserved
AAUDIO_ERROR_INTERNAL = AAUDIO_ERROR_ILLEGAL_ARGUMENT + 2,
+
+ /**
+ * The requested operation is not appropriate for the current state of AAudio.
+ */
AAUDIO_ERROR_INVALID_STATE,
// reserved
// reserved
+ /* The server rejected the handle used to identify the stream.
+ */
AAUDIO_ERROR_INVALID_HANDLE = AAUDIO_ERROR_INVALID_STATE + 3,
// reserved
+
+ /**
+ * The function is not implemented for this stream.
+ */
AAUDIO_ERROR_UNIMPLEMENTED = AAUDIO_ERROR_INVALID_HANDLE + 2,
+
+ /**
+ * A resource or information is unavailable.
+ * This could occur when an application tries to open too many streams,
+ * or a timestamp is not available.
+ */
AAUDIO_ERROR_UNAVAILABLE,
AAUDIO_ERROR_NO_FREE_HANDLES,
+
+ /**
+ * Memory could not be allocated.
+ */
AAUDIO_ERROR_NO_MEMORY,
+
+ /**
+ * A NULL pointer was passed to AAudio.
+ * Or a NULL pointer was detected internally.
+ */
AAUDIO_ERROR_NULL,
+
+ /**
+ * An operation took longer than expected.
+ */
AAUDIO_ERROR_TIMEOUT,
AAUDIO_ERROR_WOULD_BLOCK,
+
+ /**
+ * The requested data format is not supported.
+ */
AAUDIO_ERROR_INVALID_FORMAT,
+
+ /**
+ * A requested was out of range.
+ */
AAUDIO_ERROR_OUT_OF_RANGE,
+
+ /**
+ * The audio service was not available.
+ */
AAUDIO_ERROR_NO_SERVICE,
+
+ /**
+ * The requested sample rate was not supported.
+ */
AAUDIO_ERROR_INVALID_RATE
};
typedef int32_t aaudio_result_t;
@@ -126,15 +213,15 @@
AAUDIO_PERFORMANCE_MODE_NONE = 10,
/**
- * Extending battery life is most important.
+ * Extending battery life is more important than low latency.
*
* This mode is not supported in input streams.
- * Mode NONE will be used if this is requested.
+ * For input, mode NONE will be used if this is requested.
*/
AAUDIO_PERFORMANCE_MODE_POWER_SAVING,
/**
- * Reducing latency is most important.
+ * Reducing latency is more important than battery life.
*/
AAUDIO_PERFORMANCE_MODE_LOW_LATENCY
};
@@ -289,6 +376,11 @@
};
typedef int32_t aaudio_input_preset_t;
+/**
+ * These may be used with AAudioStreamBuilder_setSessionId().
+ *
+ * Added in API level 28.
+ */
enum {
/**
* Do not allocate a session ID.
@@ -302,7 +394,7 @@
/**
* Allocate a session ID that can be used to attach and control
* effects using the Java AudioEffects API.
- * Note that the use of this flag may result in higher latency.
+ * Note that using this may result in higher latency.
*
* Note that this matches the value of AudioManager.AUDIO_SESSION_ID_GENERATE.
*
@@ -474,8 +566,14 @@
/**
* Set the requested performance mode.
*
+ * Supported modes are AAUDIO_PERFORMANCE_MODE_NONE, AAUDIO_PERFORMANCE_MODE_POWER_SAVING
+ * and AAUDIO_PERFORMANCE_MODE_LOW_LATENCY.
+ *
* The default, if you do not call this function, is AAUDIO_PERFORMANCE_MODE_NONE.
*
+ * You may not get the mode you requested.
+ * You can call AAudioStream_getPerformanceMode() to find out the final mode for the stream.
+ *
* @param builder reference provided by AAudio_createStreamBuilder()
* @param mode the desired performance mode, eg. AAUDIO_PERFORMANCE_MODE_LOW_LATENCY
*/
@@ -550,10 +648,12 @@
* and then used with this function when opening another stream.
* This allows effects to be shared between streams.
*
- * Session IDs from AAudio can be used the Android Java APIs and vice versa.
+ * Session IDs from AAudio can be used with the Android Java APIs and vice versa.
* So a session ID from an AAudio stream can be passed to Java
* and effects applied using the Java AudioEffect API.
*
+ * Note that allocating or setting a session ID may result in a stream with higher latency.
+ *
* Allocated session IDs will always be positive and nonzero.
*
* Added in API level 28.
@@ -612,6 +712,14 @@
* <li>use any mutexes or other synchronization primitives</li>
* <li>sleep</li>
* <li>stop or close the stream</li>
+ * <li>AAudioStream_read()</li>
+ * <li>AAudioStream_write()</li>
+ * </ul>
+ *
+ * The following are OK to call from the data callback:
+ * <ul>
+ * <li>AAudioStream_get*()</li>
+ * <li>AAudio_convertResultToText()</li>
* </ul>
*
* If you need to move data, eg. MIDI commands, in or out of the callback function then
@@ -685,6 +793,22 @@
* Prototype for the callback function that is passed to
* AAudioStreamBuilder_setErrorCallback().
*
+ * The following may NOT be called from the error callback:
+ * <ul>
+ * <li>AAudioStream_requestStop()</li>
+ * <li>AAudioStream_requestPause()</li>
+ * <li>AAudioStream_close()</li>
+ * <li>AAudioStream_waitForStateChange()</li>
+ * <li>AAudioStream_read()</li>
+ * <li>AAudioStream_write()</li>
+ * </ul>
+ *
+ * The following are OK to call from the error callback:
+ * <ul>
+ * <li>AAudioStream_get*()</li>
+ * <li>AAudio_convertResultToText()</li>
+ * </ul>
+ *
* @param stream reference provided by AAudioStreamBuilder_openStream()
* @param userData the same address that was passed to AAudioStreamBuilder_setErrorCallback()
* @param error an AAUDIO_ERROR_* value.
@@ -856,6 +980,8 @@
*
* This call is "strong non-blocking" unless it has to wait for data.
*
+ * If the call times out then zero or a partial frame count will be returned.
+ *
* @param stream A stream created using AAudioStreamBuilder_openStream().
* @param buffer The address of the first sample.
* @param numFrames Number of frames to read. Only complete frames will be written.
@@ -879,6 +1005,8 @@
*
* This call is "strong non-blocking" unless it has to wait for room in the buffer.
*
+ * If the call times out then zero or a partial frame count will be returned.
+ *
* @param stream A stream created using AAudioStreamBuilder_openStream().
* @param buffer The address of the first sample.
* @param numFrames Number of frames to write. Only complete frames will be written.
@@ -903,7 +1031,8 @@
* This cannot be set higher than AAudioStream_getBufferCapacityInFrames().
*
* Note that you will probably not get the exact size you request.
- * Call AAudioStream_getBufferSizeInFrames() to see what the actual final size is.
+ * You can check the return value or call AAudioStream_getBufferSizeInFrames()
+ * to see what the actual final size is.
*
* @param stream reference provided by AAudioStreamBuilder_openStream()
* @param numFrames requested number of frames that can be filled without blocking
@@ -1038,7 +1167,8 @@
/**
* Passes back the number of frames that have been written since the stream was created.
- * For an output stream, this will be advanced by the application calling write().
+ * For an output stream, this will be advanced by the application calling write()
+ * or by a data callback.
* For an input stream, this will be advanced by the endpoint.
*
* The frame position is monotonically increasing.
@@ -1051,7 +1181,8 @@
/**
* Passes back the number of frames that have been read since the stream was created.
* For an output stream, this will be advanced by the endpoint.
- * For an input stream, this will be advanced by the application calling read().
+ * For an input stream, this will be advanced by the application calling read()
+ * or by a data callback.
*
* The frame position is monotonically increasing.
*
diff --git a/media/libaudioclient/include/media/AudioTimestamp.h b/media/libaudioclient/include/media/AudioTimestamp.h
index 498de8e..2cd8c87 100644
--- a/media/libaudioclient/include/media/AudioTimestamp.h
+++ b/media/libaudioclient/include/media/AudioTimestamp.h
@@ -135,8 +135,23 @@
return INVALID_OPERATION;
}
+ double getOutputServerLatencyMs(uint32_t sampleRate) const {
+ return getLatencyMs(sampleRate, LOCATION_SERVER, LOCATION_KERNEL);
+ }
+
+ double getLatencyMs(uint32_t sampleRate, Location location1, Location location2) const {
+ if (mTimeNs[location1] > 0 && mTimeNs[location2] > 0) {
+ const int64_t frameDifference =
+ mPosition[location1] - mPosition[location2];
+ const int64_t timeDifferenceNs =
+ mTimeNs[location1] - mTimeNs[location2];
+ return ((double)frameDifference * 1e9 / sampleRate - timeDifferenceNs) * 1e-6;
+ }
+ return 0.;
+ }
+
// convert fields to a printable string
- std::string toString() {
+ std::string toString() const {
std::stringstream ss;
ss << "BOOTTIME offset " << mTimebaseOffset[TIMEBASE_BOOTTIME] << "\n";
diff --git a/media/libmediaextractor/include/media/stagefright/MetaDataBase.h b/media/libmediaextractor/include/media/stagefright/MetaDataBase.h
index 6ad2441..dfe34e8 100644
--- a/media/libmediaextractor/include/media/stagefright/MetaDataBase.h
+++ b/media/libmediaextractor/include/media/stagefright/MetaDataBase.h
@@ -182,6 +182,9 @@
kKeyCASystemID = 'caid', // int32_t
kKeyCASessionID = 'seid', // raw data
+ kKeyEncryptedByteBlock = 'cblk', // uint8_t
+ kKeySkipByteBlock = 'sblk', // uint8_t
+
// Please see MediaFormat.KEY_IS_AUTOSELECT.
kKeyTrackIsAutoselect = 'auto', // bool (int32_t)
// Please see MediaFormat.KEY_IS_DEFAULT.
@@ -231,6 +234,12 @@
kTypeD263 = 'd263',
};
+enum {
+ kCryptoModeUnencrypted = 0,
+ kCryptoModeAesCtr = 1,
+ kCryptoModeAesCbc = 2,
+};
+
class Parcel;
class MetaDataBase {
diff --git a/media/libstagefright/FrameDecoder.cpp b/media/libstagefright/FrameDecoder.cpp
index 29a219f..1a185ab 100644
--- a/media/libstagefright/FrameDecoder.cpp
+++ b/media/libstagefright/FrameDecoder.cpp
@@ -498,9 +498,10 @@
return ERROR_MALFORMED;
}
- int32_t width, height;
+ int32_t width, height, stride;
CHECK(outputFormat->findInt32("width", &width));
CHECK(outputFormat->findInt32("height", &height));
+ CHECK(outputFormat->findInt32("stride", &stride));
int32_t crop_left, crop_top, crop_right, crop_bottom;
if (!outputFormat->findRect("crop", &crop_left, &crop_top, &crop_right, &crop_bottom)) {
@@ -527,11 +528,10 @@
if (converter.isValid()) {
converter.convert(
(const uint8_t *)videoFrameBuffer->data(),
- width, height,
+ width, height, stride,
crop_left, crop_top, crop_right, crop_bottom,
frame->getFlattenedData(),
- frame->mWidth,
- frame->mHeight,
+ frame->mWidth, frame->mHeight, frame->mRowBytes,
crop_left, crop_top, crop_right, crop_bottom);
return OK;
}
@@ -678,9 +678,10 @@
return ERROR_MALFORMED;
}
- int32_t width, height;
+ int32_t width, height, stride;
CHECK(outputFormat->findInt32("width", &width));
CHECK(outputFormat->findInt32("height", &height));
+ CHECK(outputFormat->findInt32("stride", &stride));
if (mFrame == NULL) {
sp<IMemory> frameMem = allocVideoFrame(
@@ -724,11 +725,10 @@
if (converter.isValid()) {
converter.convert(
(const uint8_t *)videoFrameBuffer->data(),
- width, height,
+ width, height, stride,
crop_left, crop_top, crop_right, crop_bottom,
mFrame->getFlattenedData(),
- mFrame->mWidth,
- mFrame->mHeight,
+ mFrame->mWidth, mFrame->mHeight, mFrame->mRowBytes,
dstLeft, dstTop, dstRight, dstBottom);
return OK;
}
diff --git a/media/libstagefright/codecs/xaacdec/Android.bp b/media/libstagefright/codecs/xaacdec/Android.bp
index 465951b..7392f1e 100644
--- a/media/libstagefright/codecs/xaacdec/Android.bp
+++ b/media/libstagefright/codecs/xaacdec/Android.bp
@@ -1,9 +1,6 @@
cc_library_shared {
name: "libstagefright_soft_xaacdec",
vendor_available: true,
- vndk: {
- enabled: true,
- },
srcs: [
"SoftXAAC.cpp",
@@ -16,6 +13,7 @@
cflags: [
"-Werror",
+ "-DENABLE_MPEG_D_DRC"
],
sanitize: {
diff --git a/media/libstagefright/codecs/xaacdec/SoftXAAC.cpp b/media/libstagefright/codecs/xaacdec/SoftXAAC.cpp
index 376755b..b3aefa8 100644
--- a/media/libstagefright/codecs/xaacdec/SoftXAAC.cpp
+++ b/media/libstagefright/codecs/xaacdec/SoftXAAC.cpp
@@ -34,14 +34,28 @@
#define DRC_DEFAULT_MOBILE_DRC_BOOST 127 /* maximum compression of dynamic range for mobile conf */
#define DRC_DEFAULT_MOBILE_DRC_HEAVY 1 /* switch for heavy compression for mobile conf */
#define DRC_DEFAULT_MOBILE_ENC_LEVEL (-1) /* encoder target level; -1 => the value is unknown, otherwise dB step value (e.g. 64 for -16 dB) */
+#define DRC_KEY_AAC_DRC_EFFECT_TYPE (3) /* Default Effect type is "Limited playback" */
+/* REF_LEVEL of 64 pairs well with EFFECT_TYPE of 3. */
+#define DRC_DEFAULT_MOBILE_LOUDNESS_LEVEL (64) /* Default loudness value for MPEG-D DRC */
#define PROP_DRC_OVERRIDE_REF_LEVEL "aac_drc_reference_level"
#define PROP_DRC_OVERRIDE_CUT "aac_drc_cut"
#define PROP_DRC_OVERRIDE_BOOST "aac_drc_boost"
#define PROP_DRC_OVERRIDE_HEAVY "aac_drc_heavy"
#define PROP_DRC_OVERRIDE_ENC_LEVEL "aac_drc_enc_target_level"
+#define PROP_DRC_OVERRIDE_EFFECT_TYPE "ro.aac_drc_effect_type"
+#define PROP_DRC_OVERRIDE_LOUDNESS_LEVEL "aac_drc_loudness_level"
+
#define MAX_CHANNEL_COUNT 8 /* maximum number of audio channels that can be decoded */
+
+#define RETURN_IF_NE(returned, expected, retval, str) \
+ if ( returned != expected ) { \
+ ALOGE("Error in %s: Returned: %d Expected: %d", str, returned, expected); \
+ return retval; \
+ }
+
+
namespace android {
template<class T>
@@ -76,6 +90,7 @@
mCurrentTimestamp(0),
mOutputPortSettingsChange(NONE),
mXheaacCodecHandle(NULL),
+ mMpegDDrcHandle(NULL),
mInputBufferSize(0),
mOutputFrameLength(1024),
mInputBuffer(NULL),
@@ -85,7 +100,10 @@
mPcmWdSz(0),
mChannelMask(0),
mIsCodecInitialized(false),
- mIsCodecConfigFlushRequired(false)
+ mIsCodecConfigFlushRequired(false),
+ mpegd_drc_present(0),
+ drc_flag(0)
+
{
initPorts();
CHECK_EQ(initDecoder(), (status_t)OK);
@@ -145,11 +163,32 @@
status_t SoftXAAC::initDecoder() {
status_t status = UNKNOWN_ERROR;
- unsigned int ui_drc_val;
+ int ui_drc_val;
IA_ERRORCODE err_code = IA_NO_ERROR;
- initXAACDecoder();
- if (NULL == mXheaacCodecHandle) {
- ALOGE("AAC decoder is null. initXAACDecoder Failed");
+ int loop = 0;
+
+ err_code = initXAACDecoder();
+ if(err_code != IA_NO_ERROR) {
+ if (NULL == mXheaacCodecHandle) {
+ ALOGE("AAC decoder handle is null");
+ }
+ if (NULL == mMpegDDrcHandle) {
+ ALOGE("MPEG-D DRC decoder handle is null");
+ }
+ for(loop= 1; loop < mMallocCount; loop++) {
+ if (mMemoryArray[loop] == NULL) {
+ ALOGE(" memory allocation error %d\n",loop);
+ break;
+ }
+ }
+ ALOGE("initXAACDecoder Failed");
+
+ for(loop = 0; loop < mMallocCount; loop++) {
+ if(mMemoryArray[loop])
+ free(mMemoryArray[loop]);
+ }
+ mMallocCount = 0;
+ return status;
} else {
status = OK;
}
@@ -174,8 +213,30 @@
IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_TARGET_LEVEL,
&ui_drc_val);
- ALOGV("Error code returned after DRC Target level set_config is %d", err_code);
- ALOGV("Setting IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_TARGET_LEVEL with value %d", ui_drc_val);
+ RETURN_IF_NE(err_code, IA_NO_ERROR, err_code, "IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_TARGET_LEVEL");
+#ifdef ENABLE_MPEG_D_DRC
+
+ if (property_get(PROP_DRC_OVERRIDE_LOUDNESS_LEVEL, value, NULL))
+ {
+ ui_drc_val = atoi(value);
+ ALOGV("AAC decoder using desired DRC target reference level of %d instead of %d",ui_drc_val,
+ DRC_DEFAULT_MOBILE_LOUDNESS_LEVEL);
+ }
+ else
+ {
+ ui_drc_val= DRC_DEFAULT_MOBILE_LOUDNESS_LEVEL;
+ }
+
+
+ err_code = ixheaacd_dec_api(mXheaacCodecHandle,
+ IA_API_CMD_SET_CONFIG_PARAM,
+ IA_ENHAACPLUS_DEC_DRC_TARGET_LOUDNESS,
+ &ui_drc_val);
+
+
+ RETURN_IF_NE(err_code, IA_NO_ERROR, err_code, "IA_ENHAACPLUS_DEC_DRC_TARGET_LOUDNESS");
+#endif
+
if (property_get(PROP_DRC_OVERRIDE_CUT, value, NULL))
{
@@ -192,8 +253,8 @@
IA_API_CMD_SET_CONFIG_PARAM,
IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_CUT,
&ui_drc_val);
- ALOGV("Error code returned after DRC cut factor set_config is %d", err_code);
- ALOGV("Setting IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_CUT with value %d", ui_drc_val);
+
+ RETURN_IF_NE(err_code, IA_NO_ERROR, err_code, "IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_CUT");
if (property_get(PROP_DRC_OVERRIDE_BOOST, value, NULL))
{
@@ -210,13 +271,12 @@
IA_API_CMD_SET_CONFIG_PARAM,
IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_BOOST,
&ui_drc_val);
- ALOGV("Error code returned after DRC boost factor set_config is %d", err_code);
- ALOGV("Setting DRC_DEFAULT_MOBILE_DRC_BOOST with value %d", ui_drc_val);
+ RETURN_IF_NE(err_code, IA_NO_ERROR, err_code, "IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_BOOST");
- if (property_get(PROP_DRC_OVERRIDE_BOOST, value, NULL))
+ if (property_get(PROP_DRC_OVERRIDE_HEAVY, value, NULL))
{
ui_drc_val = atoi(value);
- ALOGV("AAC decoder using desired DRC boost factor of %d instead of %d", ui_drc_val,
+ ALOGV("AAC decoder using desired Heavy compression factor of %d instead of %d", ui_drc_val,
DRC_DEFAULT_MOBILE_DRC_HEAVY);
}
else
@@ -228,9 +288,28 @@
IA_API_CMD_SET_CONFIG_PARAM,
IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_HEAVY_COMP,
&ui_drc_val);
- ALOGV("Error code returned after DRC heavy set_config is %d", err_code);
- ALOGV("Setting DRC_DEFAULT_MOBILE_DRC_HEAVY with value %d", ui_drc_val);
+ RETURN_IF_NE(err_code, IA_NO_ERROR, err_code, "IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_HEAVY_COMP");
+#ifdef ENABLE_MPEG_D_DRC
+ if (property_get(PROP_DRC_OVERRIDE_EFFECT_TYPE, value, NULL))
+ {
+ ui_drc_val = atoi(value);
+ ALOGV("AAC decoder using desired DRC effect type of %d instead of %d", ui_drc_val,
+ DRC_KEY_AAC_DRC_EFFECT_TYPE);
+ }
+ else
+ {
+ ui_drc_val = DRC_KEY_AAC_DRC_EFFECT_TYPE;
+ }
+
+ err_code = ixheaacd_dec_api(mXheaacCodecHandle,
+ IA_API_CMD_SET_CONFIG_PARAM,
+ IA_ENHAACPLUS_DEC_DRC_EFFECT_TYPE,
+ &ui_drc_val);
+
+ RETURN_IF_NE(err_code, IA_NO_ERROR, err_code, "IA_ENHAACPLUS_DEC_DRC_EFFECT_TYPE");
+
+#endif
return status;
}
@@ -436,10 +515,10 @@
return OMX_ErrorNone;
}
- case OMX_IndexParamAudioAndroidAacPresentation:
+ case OMX_IndexParamAudioAndroidAacDrcPresentation:
{
- const OMX_AUDIO_PARAM_ANDROID_AACPRESENTATIONTYPE *aacPresParams =
- (const OMX_AUDIO_PARAM_ANDROID_AACPRESENTATIONTYPE *)params;
+ const OMX_AUDIO_PARAM_ANDROID_AACDRCPRESENTATIONTYPE *aacPresParams =
+ (const OMX_AUDIO_PARAM_ANDROID_AACDRCPRESENTATIONTYPE *)params;
if (!isValidOMXParam(aacPresParams)) {
ALOGE("set OMX_ErrorBadParameter");
@@ -468,7 +547,11 @@
setXAACDRCInfo(aacPresParams->nDrcCut,
aacPresParams->nDrcBoost,
aacPresParams->nTargetReferenceLevel,
- aacPresParams->nHeavyCompression);
+ aacPresParams->nHeavyCompression
+ #ifdef ENABLE_MPEG_D_DRC
+ ,aacPresParams->nDrcEffectType
+ #endif
+ ); // TOD0 : Revert this change
return OMX_ErrorNone;
}
@@ -774,7 +857,6 @@
outInfo->mOwnedByUs = false;
outQueue.erase(outQueue.begin());
outInfo = NULL;
- ALOGV("out timestamp %lld / %d", outHeader->nTimeStamp, outHeader->nFilledLen);
notifyFillBufferDone(outHeader);
outHeader = NULL;
}
@@ -831,31 +913,33 @@
IA_API_CMD_INIT,
IA_CMD_TYPE_FLUSH_MEM,
NULL);
- ALOGV("Codec initialized:%d",mIsCodecInitialized);
- ALOGV("Error code from first flush %d",err_code);
err_code = ixheaacd_dec_api(mXheaacCodecHandle,
IA_API_CMD_SET_INPUT_BYTES,
0,
&inBufferLength);
-
+
err_code = ixheaacd_dec_api(mXheaacCodecHandle,
IA_API_CMD_INIT,
IA_CMD_TYPE_FLUSH_MEM,
NULL);
-
+
err_code = ixheaacd_dec_api(mXheaacCodecHandle,
IA_API_CMD_INIT,
IA_CMD_TYPE_INIT_DONE_QUERY,
&ui_init_done);
-
- ALOGV("Flush called");
+
if (ui_init_done) {
err_code = getXAACStreamInfo();
ALOGV("Found Codec with below config---\nsampFreq %d\nnumChannels %d\npcmWdSz %d\nchannelMask %d\noutputFrameLength %d",
mSampFreq,mNumChannels,mPcmWdSz,mChannelMask,mOutputFrameLength);
- mIsCodecInitialized = true;
+ if(mNumChannels > MAX_CHANNEL_COUNT) {
+ ALOGE(" No of channels are more than max channels\n");
+ mIsCodecInitialized = false;
+ }
+ else
+ mIsCodecInitialized = true;
}
}
@@ -918,10 +1002,9 @@
/* Get memory information and allocate memory */
/* Memory variables */
- UWORD32 n_mems, ui_rem;
UWORD32 ui_proc_mem_tabs_size;
/* API size */
- UWORD32 pui_ap_isize;
+ UWORD32 pui_api_size;
mInputBufferSize = 0;
mInputBuffer = 0;
@@ -937,12 +1020,14 @@
err_code = ixheaacd_dec_api(NULL,
IA_API_CMD_GET_API_SIZE,
0,
- &pui_ap_isize);
- ALOGV("return code of IA_API_CMD_GET_API_SIZE: %d",err_code);
+ &pui_api_size);
+ RETURN_IF_NE(err_code, IA_NO_ERROR, err_code, "IA_API_CMD_GET_API_SIZE");
+
/* Allocate memory for API */
- mMemoryArray[mMallocCount] = memalign(4, pui_ap_isize);
+ mMemoryArray[mMallocCount] = memalign(4, pui_api_size);
if (mMemoryArray[mMallocCount] == NULL) {
- ALOGE("malloc for pui_ap_isize + 4 >> %d Failed",pui_ap_isize + 4);
+ ALOGE("malloc for pui_api_size + 4 >> %d Failed",pui_api_size + 4);
+ return IA_FATAL_ERROR;
}
/* Set API object with the memory allocated */
mXheaacCodecHandle =
@@ -954,7 +1039,38 @@
IA_API_CMD_INIT,
IA_CMD_TYPE_INIT_API_PRE_CONFIG_PARAMS,
NULL);
- ALOGV("return code of IA_CMD_TYPE_INIT_API_PRE_CONFIG_PARAMS: %d",err_code);
+ RETURN_IF_NE(err_code, IA_NO_ERROR, err_code, "IA_CMD_TYPE_INIT_API_PRE_CONFIG_PARAMS");
+#ifdef ENABLE_MPEG_D_DRC
+ /* Get the API size */
+ err_code = ia_drc_dec_api(NULL, IA_API_CMD_GET_API_SIZE, 0, &pui_api_size);
+
+ RETURN_IF_NE(err_code, IA_NO_ERROR, err_code, "IA_API_CMD_GET_API_SIZE");
+
+ /* Allocate memory for API */
+ mMemoryArray[mMallocCount] = memalign(4, pui_api_size);
+
+ if(mMemoryArray[mMallocCount] == NULL)
+ {
+ ALOGE("malloc for drc api structure Failed");
+ return IA_FATAL_ERROR;
+ }
+ memset(mMemoryArray[mMallocCount],0,pui_api_size);
+
+ /* Set API object with the memory allocated */
+ mMpegDDrcHandle =
+ (pVOID)((WORD8*)mMemoryArray[mMallocCount]);
+ mMallocCount++;
+
+
+ /* Set the config params to default values */
+ err_code = ia_drc_dec_api(
+ mMpegDDrcHandle,
+ IA_API_CMD_INIT,
+ IA_CMD_TYPE_INIT_API_PRE_CONFIG_PARAMS,
+ NULL);
+
+ RETURN_IF_NE(err_code, IA_NO_ERROR, err_code, "IA_CMD_TYPE_INIT_API_PRE_CONFIG_PARAMS");
+#endif
/* ******************************************************************/
/* Set config parameters */
@@ -964,7 +1080,7 @@
IA_API_CMD_SET_CONFIG_PARAM,
IA_ENHAACPLUS_DEC_CONFIG_PARAM_ISMP4,
&ui_mp4_flag);
- ALOGV("return code of IA_ENHAACPLUS_DEC_CONFIG_PARAM_ISMP4: %d",err_code);
+ RETURN_IF_NE(err_code, IA_NO_ERROR, err_code, "IA_ENHAACPLUS_DEC_CONFIG_PARAM_ISMP4");
/* ******************************************************************/
/* Initialize Memory info tables */
@@ -975,10 +1091,11 @@
IA_API_CMD_GET_MEMTABS_SIZE,
0,
&ui_proc_mem_tabs_size);
- ALOGV("return code of IA_API_CMD_GET_MEMTABS_SIZE: %d",err_code);
+ RETURN_IF_NE(err_code, IA_NO_ERROR, err_code, "IA_API_CMD_GET_MEMTABS_SIZE");
mMemoryArray[mMallocCount] = memalign(4, ui_proc_mem_tabs_size);
if (mMemoryArray[mMallocCount] == NULL) {
ALOGE("Malloc for size (ui_proc_mem_tabs_size + 4) = %d failed!",ui_proc_mem_tabs_size + 4);
+ return IA_FATAL_ERROR;
}
/* Set pointer for process memory tables */
@@ -986,7 +1103,7 @@
IA_API_CMD_SET_MEMTABS_PTR,
0,
(pVOID)((WORD8*)mMemoryArray[mMallocCount]));
- ALOGV("return code of IA_API_CMD_SET_MEMTABS_PTR: %d",err_code);
+ RETURN_IF_NE(err_code, IA_NO_ERROR, err_code, "IA_API_CMD_SET_MEMTABS_PTR");
mMallocCount++;
/* initialize the API, post config, fill memory tables */
@@ -994,20 +1111,14 @@
IA_API_CMD_INIT,
IA_CMD_TYPE_INIT_API_POST_CONFIG_PARAMS,
NULL);
- ALOGV("return code of IA_CMD_TYPE_INIT_API_POST_CONFIG_PARAMS: %d",err_code);
+ RETURN_IF_NE(err_code, IA_NO_ERROR, err_code, "IA_CMD_TYPE_INIT_API_POST_CONFIG_PARAMS");
/* ******************************************************************/
/* Allocate Memory with info from library */
/* ******************************************************************/
-
- /* Get number of memory tables required */
- err_code = ixheaacd_dec_api(mXheaacCodecHandle,
- IA_API_CMD_GET_N_MEMTABS,
- 0,
- &n_mems);
- ALOGV("return code of IA_API_CMD_GET_N_MEMTABS: %d",err_code);
-
- for(i = 0; i < (WORD32)n_mems; i++) {
+ /* There are four different types of memories, that needs to be allocated */
+ /* persistent,scratch,input and output */
+ for(i = 0; i < 4; i++) {
int ui_size = 0, ui_alignment = 0, ui_type = 0;
pVOID pv_alloc_ptr;
@@ -1016,26 +1127,27 @@
IA_API_CMD_GET_MEM_INFO_SIZE,
i,
&ui_size);
- ALOGV("return code of IA_API_CMD_GET_MEM_INFO_SIZE: %d",err_code);
+ RETURN_IF_NE(err_code, IA_NO_ERROR, err_code, "IA_API_CMD_GET_MEM_INFO_SIZE");
/* Get memory alignment */
err_code = ixheaacd_dec_api(mXheaacCodecHandle,
IA_API_CMD_GET_MEM_INFO_ALIGNMENT,
i,
&ui_alignment);
- ALOGV("return code of IA_API_CMD_GET_MEM_INFO_ALIGNMENT: %d",err_code);
+ RETURN_IF_NE(err_code, IA_NO_ERROR, err_code, "IA_API_CMD_GET_MEM_INFO_ALIGNMENT");
/* Get memory type */
err_code = ixheaacd_dec_api(mXheaacCodecHandle,
IA_API_CMD_GET_MEM_INFO_TYPE,
i,
&ui_type);
- ALOGV("return code of IA_API_CMD_GET_MEM_INFO_TYPE: %d",err_code);
+ RETURN_IF_NE(err_code, IA_NO_ERROR, err_code, "IA_API_CMD_GET_MEM_INFO_TYPE");
mMemoryArray[mMallocCount] =
memalign(ui_alignment , ui_size);
if (mMemoryArray[mMallocCount] == NULL) {
ALOGE("Malloc for size (ui_size + ui_alignment) = %d failed!",ui_size + ui_alignment);
+ return IA_FATAL_ERROR;
}
pv_alloc_ptr =
(pVOID )((WORD8*)mMemoryArray[mMallocCount]);
@@ -1046,7 +1158,7 @@
IA_API_CMD_SET_MEM_PTR,
i,
pv_alloc_ptr);
- ALOGV("return code of IA_API_CMD_SET_MEM_PTR: %d",err_code);
+ RETURN_IF_NE(err_code, IA_NO_ERROR, err_code, "IA_API_CMD_SET_MEM_PTR");
if (ui_type == IA_MEMTYPE_INPUT) {
mInputBuffer = (pWORD8)pv_alloc_ptr;
mInputBufferSize = ui_size;
@@ -1081,7 +1193,7 @@
IA_API_CMD_SET_INPUT_BYTES,
0,
&inBufferLength);
- ALOGV("return code of IA_API_CMD_SET_INPUT_BYTES: %d",err_code);
+ RETURN_IF_NE(err_code, IA_NO_ERROR, err_code, "IA_API_CMD_SET_INPUT_BYTES");
if (mIsCodecConfigFlushRequired) {
/* If codec is already initialized, then GA header is passed again */
@@ -1091,7 +1203,7 @@
IA_API_CMD_INIT,
IA_CMD_TYPE_GA_HDR,
NULL);
- ALOGV("return code of IA_CMD_TYPE_GA_HDR: %d",err_code);
+ RETURN_IF_NE(err_code, IA_NO_ERROR, err_code, "IA_CMD_TYPE_GA_HDR");
}
else {
/* Initialize the process */
@@ -1099,7 +1211,7 @@
IA_API_CMD_INIT,
IA_CMD_TYPE_INIT_PROCESS,
NULL);
- ALOGV("return code of IA_CMD_TYPE_INIT_PROCESS: %d",err_code);
+ ALOGV("IA_CMD_TYPE_INIT_PROCESS returned error_code = %d",err_code);
}
/* Checking for end of initialization */
@@ -1107,25 +1219,344 @@
IA_API_CMD_INIT,
IA_CMD_TYPE_INIT_DONE_QUERY,
&ui_init_done);
- ALOGV("return code of IA_CMD_TYPE_INIT_DONE_QUERY: %d",err_code);
+ RETURN_IF_NE(err_code, IA_NO_ERROR, err_code, "IA_CMD_TYPE_INIT_DONE_QUERY");
/* How much buffer is used in input buffers */
err_code = ixheaacd_dec_api(mXheaacCodecHandle,
IA_API_CMD_GET_CURIDX_INPUT_BUF,
0,
&i_bytes_consumed);
- ALOGV("return code of IA_API_CMD_GET_CURIDX_INPUT_BUF: %d",err_code);
+ RETURN_IF_NE(err_code, IA_NO_ERROR, err_code, "IA_API_CMD_GET_CURIDX_INPUT_BUF");
if(ui_init_done){
err_code = getXAACStreamInfo();
ALOGI("Found Codec with below config---\nsampFreq %d\nnumChannels %d\npcmWdSz %d\nchannelMask %d\noutputFrameLength %d",
mSampFreq,mNumChannels,mPcmWdSz,mChannelMask,mOutputFrameLength);
mIsCodecInitialized = true;
+
+#ifdef ENABLE_MPEG_D_DRC
+ configMPEGDDrc();
+#endif
}
return err_code;
}
+int SoftXAAC::configMPEGDDrc()
+{
+ IA_ERRORCODE err_code = IA_NO_ERROR;
+ int i_effect_type;
+ int i_loud_norm;
+ int i_target_loudness;
+ unsigned int i_sbr_mode;
+ int n_mems;
+ int i;
+#ifdef ENABLE_MPEG_D_DRC
+ {
+
+ /* Sampling Frequency */
+ {
+ err_code =
+ ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_CONFIG_PARAM,
+ IA_DRC_DEC_CONFIG_PARAM_SAMP_FREQ, &mSampFreq);
+ RETURN_IF_NE(err_code, IA_NO_ERROR, err_code, "IA_DRC_DEC_CONFIG_PARAM_SAMP_FREQ");
+ }
+ /* Total Number of Channels */
+ {
+ err_code =
+ ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_CONFIG_PARAM,
+ IA_DRC_DEC_CONFIG_PARAM_NUM_CHANNELS, &mNumChannels);
+ RETURN_IF_NE(err_code, IA_NO_ERROR, err_code, "IA_DRC_DEC_CONFIG_PARAM_NUM_CHANNELS");
+ }
+
+ /* PCM word size */
+ {
+ err_code =
+ ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_CONFIG_PARAM,
+ IA_DRC_DEC_CONFIG_PARAM_PCM_WDSZ, &mPcmWdSz);
+ RETURN_IF_NE(err_code, IA_NO_ERROR, err_code, "IA_DRC_DEC_CONFIG_PARAM_PCM_WDSZ");
+ }
+
+ /*Set Effect Type*/
+
+ {
+ err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_GET_CONFIG_PARAM,
+ IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_EFFECT_TYPE, &i_effect_type);
+ RETURN_IF_NE(err_code, IA_NO_ERROR, err_code, "IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_EFFECT_TYPE");
+
+
+ err_code =
+ ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_CONFIG_PARAM,
+ IA_DRC_DEC_CONFIG_DRC_EFFECT_TYPE, &i_effect_type);
+ RETURN_IF_NE(err_code, IA_NO_ERROR, err_code, "IA_DRC_DEC_CONFIG_DRC_EFFECT_TYPE");
+
+ }
+
+/*Set target loudness */
+
+ {
+ err_code = ixheaacd_dec_api(
+ mXheaacCodecHandle, IA_API_CMD_GET_CONFIG_PARAM,
+ IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_TARGET_LOUDNESS, &i_target_loudness);
+ RETURN_IF_NE(err_code, IA_NO_ERROR, err_code, "IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_TARGET_LOUDNESS");
+
+
+ err_code =
+ ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_CONFIG_PARAM,
+ IA_DRC_DEC_CONFIG_DRC_TARGET_LOUDNESS, &i_target_loudness);
+ RETURN_IF_NE(err_code, IA_NO_ERROR, err_code, "IA_DRC_DEC_CONFIG_DRC_TARGET_LOUDNESS");
+
+ }
+
+ /*Set loud_norm_flag*/
+ {
+ err_code = ixheaacd_dec_api(
+ mXheaacCodecHandle, IA_API_CMD_GET_CONFIG_PARAM,
+ IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_LOUD_NORM, &i_loud_norm);
+ RETURN_IF_NE(err_code, IA_NO_ERROR , err_code,"IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_LOUD_NORM");
+
+
+ err_code =
+ ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_CONFIG_PARAM,
+ IA_DRC_DEC_CONFIG_DRC_LOUD_NORM, &i_loud_norm);
+ RETURN_IF_NE(err_code, IA_NO_ERROR , err_code,"IA_DRC_DEC_CONFIG_DRC_LOUD_NORM");
+
+ }
+
+
+
+ err_code = ixheaacd_dec_api(
+ mXheaacCodecHandle, IA_API_CMD_GET_CONFIG_PARAM,
+ IA_ENHAACPLUS_DEC_CONFIG_PARAM_SBR_MODE, &i_sbr_mode);
+ RETURN_IF_NE(err_code, IA_NO_ERROR , err_code,"IA_ENHAACPLUS_DEC_CONFIG_PARAM_SBR_MODE");
+
+
+ if(i_sbr_mode!=0)
+ {
+ WORD32 frame_length;
+ if (i_sbr_mode==1)
+ {
+ frame_length=2048;
+ }
+ else if(i_sbr_mode==3)
+ {
+ frame_length=4096;
+ }
+ else
+ {
+ frame_length=1024;
+ }
+ err_code =
+ ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_CONFIG_PARAM,
+ IA_DRC_DEC_CONFIG_PARAM_FRAME_SIZE, &frame_length);
+ RETURN_IF_NE(err_code, IA_NO_ERROR, err_code, "IA_DRC_DEC_CONFIG_PARAM_FRAME_SIZE");
+
+ }
+
+
+ err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_INIT,
+ IA_CMD_TYPE_INIT_API_POST_CONFIG_PARAMS, NULL);
+
+ RETURN_IF_NE(err_code, IA_NO_ERROR, err_code, "IA_CMD_TYPE_INIT_API_POST_CONFIG_PARAMS");
+
+
+
+ for (i = 0; i < (WORD32)2; i++) {
+ WORD32 ui_size, ui_alignment, ui_type;
+ pVOID pv_alloc_ptr;
+
+ /* Get memory size */
+ err_code = ia_drc_dec_api(mMpegDDrcHandle,
+ IA_API_CMD_GET_MEM_INFO_SIZE, i, &ui_size);
+
+ RETURN_IF_NE(err_code, IA_NO_ERROR, err_code, "IA_API_CMD_GET_MEM_INFO_SIZE");
+
+ /* Get memory alignment */
+ err_code =
+ ia_drc_dec_api(mMpegDDrcHandle,
+ IA_API_CMD_GET_MEM_INFO_ALIGNMENT, i, &ui_alignment);
+
+ RETURN_IF_NE(err_code, IA_NO_ERROR, err_code, "IA_API_CMD_GET_MEM_INFO_ALIGNMENT");
+
+ /* Get memory type */
+ err_code = ia_drc_dec_api(mMpegDDrcHandle,
+ IA_API_CMD_GET_MEM_INFO_TYPE, i, &ui_type);
+
+ RETURN_IF_NE(err_code, IA_NO_ERROR, err_code, "IA_API_CMD_GET_MEM_INFO_TYPE");
+
+
+ mMemoryArray[mMallocCount] = memalign(4, ui_size);
+ if (mMemoryArray[mMallocCount] == NULL) {
+ ALOGE(" Cannot create requested memory %d",ui_size);
+ return IA_FATAL_ERROR;
+ }
+ pv_alloc_ptr =
+ (pVOID )((WORD8*)mMemoryArray[mMallocCount]);
+ mMallocCount++;
+
+ /* Set the buffer pointer */
+ err_code = ia_drc_dec_api(mMpegDDrcHandle,
+ IA_API_CMD_SET_MEM_PTR, i, pv_alloc_ptr);
+
+ RETURN_IF_NE(err_code, IA_NO_ERROR, err_code, "IA_API_CMD_SET_MEM_PTR");
+ }
+ {
+ WORD32 ui_size;
+ ui_size=8192*2;
+ mMemoryArray[mMallocCount]=memalign(4, ui_size);
+ if (mMemoryArray[mMallocCount] == NULL) {
+ ALOGE(" Cannot create requested memory %d",ui_size);
+ return IA_FATAL_ERROR;
+ }
+
+ drc_ip_buf=(int8_t *)mMemoryArray[mMallocCount];
+ mMallocCount++;
+ err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_MEM_PTR,
+ 2, /*mOutputBuffer*/ drc_ip_buf);
+ RETURN_IF_NE(err_code, IA_NO_ERROR, err_code, "IA_API_CMD_SET_MEM_PTR");
+
+ mMemoryArray[mMallocCount]=memalign(4, ui_size);
+ if (mMemoryArray[mMallocCount] == NULL) {
+ ALOGE(" Cannot create requested memory %d",ui_size);
+ return IA_FATAL_ERROR;
+ }
+
+ drc_op_buf=(int8_t *)mMemoryArray[mMallocCount];
+ mMallocCount++;
+ err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_MEM_PTR,
+ 3, /*mOutputBuffer*/ drc_op_buf);
+ RETURN_IF_NE(err_code, IA_NO_ERROR, err_code, "IA_API_CMD_SET_MEM_PTR");
+ }
+ /*ITTIAM: DRC buffers
+ buf[0] - contains extension element pay load loudness related
+ buf[1] - contains extension element pay load*/
+ {
+ VOID *p_array[2][16];
+ WORD32 ii;
+ WORD32 buf_sizes[2][16];
+ WORD32 num_elements;
+ WORD32 num_config_ext;
+ WORD32 bit_str_fmt = 1;
+
+
+
+ WORD32 uo_num_chan;
+
+ memset(buf_sizes, 0, 32 * sizeof(WORD32));
+
+ err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_GET_CONFIG_PARAM,
+ IA_ENHAACPLUS_DEC_CONFIG_EXT_ELE_BUF_SIZES, &buf_sizes[0][0]);
+
+
+ err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_GET_CONFIG_PARAM,
+ IA_ENHAACPLUS_DEC_CONFIG_EXT_ELE_PTR, &p_array);
+
+
+ err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_INIT,
+ IA_CMD_TYPE_INIT_SET_BUFF_PTR, 0);
+
+
+
+ err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_GET_CONFIG_PARAM,
+ IA_ENHAACPLUS_DEC_CONFIG_NUM_ELE, &num_elements);
+
+ err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_GET_CONFIG_PARAM,
+ IA_ENHAACPLUS_DEC_CONFIG_NUM_CONFIG_EXT, &num_config_ext);
+
+ for (ii = 0; ii < num_config_ext; ii++) {
+ /*copy loudness bitstream*/
+ if (buf_sizes[0][ii] > 0) {
+ memcpy(drc_ip_buf, p_array[0][ii], buf_sizes[0][ii]);
+
+ /*Set bitstream_split_format */
+ err_code = ia_drc_dec_api(
+ mMpegDDrcHandle, IA_API_CMD_SET_CONFIG_PARAM,
+ IA_DRC_DEC_CONFIG_PARAM_BITS_FORMAT, &bit_str_fmt);
+
+ /* Set number of bytes to be processed */
+ err_code = ia_drc_dec_api(mMpegDDrcHandle,
+ IA_API_CMD_SET_INPUT_BYTES_IL_BS, 0,
+ &buf_sizes[0][ii]);
+
+
+
+ /* Execute process */
+ err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_INIT,
+ IA_CMD_TYPE_INIT_CPY_IL_BSF_BUFF, NULL);
+
+
+
+ drc_flag = 1;
+ }
+ }
+
+ for (ii = 0; ii < num_elements; ii++) {
+ /*copy config bitstream*/
+ if (buf_sizes[1][ii] > 0) {
+ memcpy(drc_ip_buf, p_array[1][ii], buf_sizes[1][ii]);
+ /* Set number of bytes to be processed */
+
+ /*Set bitstream_split_format */
+ err_code = ia_drc_dec_api(
+ mMpegDDrcHandle, IA_API_CMD_SET_CONFIG_PARAM,
+ IA_DRC_DEC_CONFIG_PARAM_BITS_FORMAT, &bit_str_fmt);
+
+ err_code = ia_drc_dec_api(mMpegDDrcHandle,
+ IA_API_CMD_SET_INPUT_BYTES_IC_BS, 0,
+ &buf_sizes[1][ii]);
+
+
+
+ /* Execute process */
+ err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_INIT,
+ IA_CMD_TYPE_INIT_CPY_IC_BSF_BUFF, NULL);
+
+
+
+ drc_flag = 1;
+ }
+ }
+
+ if (drc_flag == 1) {
+ mpegd_drc_present = 1;
+ } else {
+ mpegd_drc_present = 0;
+ }
+
+
+ /*Read interface buffer config file bitstream*/
+ if(mpegd_drc_present==1){
+
+ WORD32 interface_is_present = 1;
+
+
+ err_code = ia_drc_dec_api(
+ mMpegDDrcHandle, IA_API_CMD_SET_CONFIG_PARAM,
+ IA_DRC_DEC_CONFIG_PARAM_INT_PRESENT, &interface_is_present);
+
+
+
+ /* Execute process */
+ err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_INIT,
+ IA_CMD_TYPE_INIT_CPY_IN_BSF_BUFF, NULL);
+
+
+ err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_INIT,
+ IA_CMD_TYPE_INIT_PROCESS, NULL);
+
+
+ err_code = ia_drc_dec_api(
+ mMpegDDrcHandle, IA_API_CMD_GET_CONFIG_PARAM,
+ IA_DRC_DEC_CONFIG_PARAM_NUM_CHANNELS, &uo_num_chan);
+
+ }
+ }
+ }
+#endif
+
+return err_code;
+
+}
int SoftXAAC::decodeXAACStream(uint8_t* inBuffer,
uint32_t inBufferLength,
int32_t *bytesConsumed,
@@ -1143,14 +1574,14 @@
IA_API_CMD_SET_INPUT_BYTES,
0,
&inBufferLength);
- ALOGV("return code of IA_API_CMD_SET_INPUT_BYTES: %d",err_code);
+ RETURN_IF_NE(err_code, IA_NO_ERROR, err_code, "IA_API_CMD_SET_INPUT_BYTES");
/* Execute process */
err_code = ixheaacd_dec_api(mXheaacCodecHandle,
IA_API_CMD_EXECUTE,
IA_CMD_TYPE_DO_EXECUTE,
NULL);
- ALOGV("return code of IA_CMD_TYPE_DO_EXECUTE: %d",err_code);
+ RETURN_IF_NE(err_code, IA_NO_ERROR, err_code, "IA_CMD_TYPE_DO_EXECUTE");
UWORD32 ui_exec_done;
/* Checking for end of processing */
@@ -1158,22 +1589,78 @@
IA_API_CMD_EXECUTE,
IA_CMD_TYPE_DONE_QUERY,
&ui_exec_done);
- ALOGV("return code of IA_CMD_TYPE_DONE_QUERY: %d",err_code);
+ RETURN_IF_NE(err_code, IA_NO_ERROR, err_code, "IA_CMD_TYPE_DONE_QUERY");
+#ifdef ENABLE_MPEG_D_DRC
+ {
+ if (ui_exec_done != 1) {
+ VOID *p_array; // ITTIAM:buffer to handle gain payload
+ WORD32 buf_size = 0; // ITTIAM:gain payload length
+ WORD32 bit_str_fmt = 1;
+ WORD32 gain_stream_flag = 1;
+
+ err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_GET_CONFIG_PARAM,
+ IA_ENHAACPLUS_DEC_CONFIG_GAIN_PAYLOAD_LEN, &buf_size);
+
+
+ err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_GET_CONFIG_PARAM,
+ IA_ENHAACPLUS_DEC_CONFIG_GAIN_PAYLOAD_BUF, &p_array);
+
+
+ if (buf_size > 0) {
+ /*Set bitstream_split_format */
+ err_code = ia_drc_dec_api(
+ mMpegDDrcHandle, IA_API_CMD_SET_CONFIG_PARAM,
+ IA_DRC_DEC_CONFIG_PARAM_BITS_FORMAT, &bit_str_fmt);
+
+ memcpy(drc_ip_buf, p_array, buf_size);
+ /* Set number of bytes to be processed */
+ err_code =
+ ia_drc_dec_api(mMpegDDrcHandle,
+ IA_API_CMD_SET_INPUT_BYTES_BS, 0, &buf_size);
+
+ err_code = ia_drc_dec_api(
+ mMpegDDrcHandle, IA_API_CMD_SET_CONFIG_PARAM,
+ IA_DRC_DEC_CONFIG_GAIN_STREAM_FLAG, &gain_stream_flag);
+
+
+ /* Execute process */
+ err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_INIT,
+ IA_CMD_TYPE_INIT_CPY_BSF_BUFF, NULL);
+
+
+ mpegd_drc_present = 1;
+ }
+ }
+ }
+#endif
/* How much buffer is used in input buffers */
err_code = ixheaacd_dec_api(mXheaacCodecHandle,
IA_API_CMD_GET_CURIDX_INPUT_BUF,
0,
bytesConsumed);
- ALOGV("return code of IA_API_CMD_GET_CURIDX_INPUT_BUF: %d",err_code);
+ RETURN_IF_NE(err_code, IA_NO_ERROR, err_code, "IA_API_CMD_GET_CURIDX_INPUT_BUF");
/* Get the output bytes */
err_code = ixheaacd_dec_api(mXheaacCodecHandle,
IA_API_CMD_GET_OUTPUT_BYTES,
0,
outBytes);
- ALOGV("return code of IA_API_CMD_GET_OUTPUT_BYTES: %d",err_code);
+ RETURN_IF_NE(err_code, IA_NO_ERROR, err_code, "IA_API_CMD_GET_OUTPUT_BYTES");
+#ifdef ENABLE_MPEG_D_DRC
+ if (mpegd_drc_present == 1) {
+ memcpy(drc_ip_buf, mOutputBuffer, *outBytes);
+ err_code = ia_drc_dec_api(mMpegDDrcHandle,
+ IA_API_CMD_SET_INPUT_BYTES, 0, outBytes);
+
+
+ err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_EXECUTE,
+ IA_CMD_TYPE_DO_EXECUTE, NULL);
+
+ memcpy(mOutputBuffer, drc_op_buf, *outBytes);
+ }
+#endif
return err_code;
}
@@ -1185,7 +1672,7 @@
IA_API_CMD_INPUT_OVER,
0,
NULL);
- ALOGV("return code of IA_API_CMD_INPUT_OVER: %d",err_code);
+ RETURN_IF_NE(err_code, IA_NO_ERROR, err_code, "IA_API_CMD_INPUT_OVER");
for(int i = 0; i < mMallocCount; i++)
{
@@ -1205,28 +1692,28 @@
IA_API_CMD_GET_CONFIG_PARAM,
IA_ENHAACPLUS_DEC_CONFIG_PARAM_SAMP_FREQ,
&mSampFreq);
- ALOGV("return code of IA_ENHAACPLUS_DEC_CONFIG_PARAM_SAMP_FREQ: %d",err_code);
+ RETURN_IF_NE(err_code, IA_NO_ERROR, err_code, "IA_ENHAACPLUS_DEC_CONFIG_PARAM_SAMP_FREQ");
/* Total Number of Channels */
err_code = ixheaacd_dec_api(mXheaacCodecHandle,
IA_API_CMD_GET_CONFIG_PARAM,
IA_ENHAACPLUS_DEC_CONFIG_PARAM_NUM_CHANNELS,
&mNumChannels);
- ALOGV("return code of IA_ENHAACPLUS_DEC_CONFIG_PARAM_NUM_CHANNELS: %d",err_code);
+ RETURN_IF_NE(err_code, IA_NO_ERROR, err_code, "IA_ENHAACPLUS_DEC_CONFIG_PARAM_NUM_CHANNELS");
/* PCM word size */
err_code = ixheaacd_dec_api(mXheaacCodecHandle,
IA_API_CMD_GET_CONFIG_PARAM,
IA_ENHAACPLUS_DEC_CONFIG_PARAM_PCM_WDSZ,
&mPcmWdSz);
- ALOGV("return code of IA_ENHAACPLUS_DEC_CONFIG_PARAM_PCM_WDSZ: %d",err_code);
+ RETURN_IF_NE(err_code, IA_NO_ERROR, err_code, "IA_ENHAACPLUS_DEC_CONFIG_PARAM_PCM_WDSZ");
/* channel mask to tell the arrangement of channels in bit stream */
err_code = ixheaacd_dec_api(mXheaacCodecHandle,
IA_API_CMD_GET_CONFIG_PARAM,
IA_ENHAACPLUS_DEC_CONFIG_PARAM_CHANNEL_MASK,
&mChannelMask);
- ALOGV("return code of IA_ENHAACPLUS_DEC_CONFIG_PARAM_CHANNEL_MASK: %d",err_code);
+ RETURN_IF_NE(err_code, IA_NO_ERROR, err_code, "IA_ENHAACPLUS_DEC_CONFIG_PARAM_CHANNEL_MASK");
/* Channel mode to tell MONO/STEREO/DUAL-MONO/NONE_OF_THESE */
UWORD32 ui_channel_mode;
@@ -1234,7 +1721,7 @@
IA_API_CMD_GET_CONFIG_PARAM,
IA_ENHAACPLUS_DEC_CONFIG_PARAM_CHANNEL_MODE,
&ui_channel_mode);
- ALOGV("return code of IA_ENHAACPLUS_DEC_CONFIG_PARAM_CHANNEL_MODE: %d",err_code);
+ RETURN_IF_NE(err_code, IA_NO_ERROR, err_code, "IA_ENHAACPLUS_DEC_CONFIG_PARAM_CHANNEL_MODE");
if(ui_channel_mode == 0)
ALOGV("Channel Mode: MONO_OR_PS\n");
else if(ui_channel_mode == 1)
@@ -1250,7 +1737,7 @@
IA_API_CMD_GET_CONFIG_PARAM,
IA_ENHAACPLUS_DEC_CONFIG_PARAM_SBR_MODE,
&ui_sbr_mode);
- ALOGV("return code of IA_ENHAACPLUS_DEC_CONFIG_PARAM_SBR_MODE: %d",err_code);
+ RETURN_IF_NE(err_code, IA_NO_ERROR, err_code, "IA_ENHAACPLUS_DEC_CONFIG_PARAM_SBR_MODE");
if(ui_sbr_mode == 0)
ALOGV("SBR Mode: NOT_PRESENT\n");
else if(ui_sbr_mode == 1)
@@ -1271,51 +1758,116 @@
IA_ERRORCODE SoftXAAC::setXAACDRCInfo(int32_t drcCut,
int32_t drcBoost,
int32_t drcRefLevel,
- int32_t drcHeavyCompression) {
+ int32_t drcHeavyCompression
+ #ifdef ENABLE_MPEG_D_DRC
+ ,int32_t drEffectType
+ #endif
+ ) {
IA_ERRORCODE err_code = IA_NO_ERROR;
int32_t ui_drc_enable = 1;
+ int32_t i_effect_type, i_target_loudness, i_loud_norm;
err_code = ixheaacd_dec_api(mXheaacCodecHandle,
IA_API_CMD_SET_CONFIG_PARAM,
IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_ENABLE,
&ui_drc_enable);
- ALOGV("return code of IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_ENABLE: %d",err_code);
+ RETURN_IF_NE(err_code, IA_NO_ERROR, err_code, "IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_ENABLE");
if (drcCut !=-1) {
- ALOGI("set drcCut=%d", drcCut);
err_code = ixheaacd_dec_api(mXheaacCodecHandle,
IA_API_CMD_SET_CONFIG_PARAM,
IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_CUT,
&drcCut);
- ALOGV("return code of IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_CUT: %d",err_code);
+ RETURN_IF_NE(err_code, IA_NO_ERROR, err_code, "IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_CUT");
}
if (drcBoost !=-1) {
- ALOGI("set drcBoost=%d", drcBoost);
err_code = ixheaacd_dec_api(mXheaacCodecHandle,
IA_API_CMD_SET_CONFIG_PARAM,
IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_BOOST,
&drcBoost);
- ALOGV("return code of IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_BOOST: %d",err_code);
+ RETURN_IF_NE(err_code, IA_NO_ERROR, err_code, "IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_BOOST");
}
if (drcRefLevel != -1) {
- ALOGI("set drcRefLevel=%d", drcRefLevel);
err_code = ixheaacd_dec_api(mXheaacCodecHandle,
IA_API_CMD_SET_CONFIG_PARAM,
IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_TARGET_LEVEL,
&drcRefLevel);
- ALOGV("return code of IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_TARGET_LEVEL: %d",err_code);
+ RETURN_IF_NE(err_code, IA_NO_ERROR, err_code, "IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_TARGET_LEVEL");
}
-
+#ifdef ENABLE_MPEG_D_DRC
+ if (drcRefLevel != -1) {
+ err_code = ixheaacd_dec_api(mXheaacCodecHandle,
+ IA_API_CMD_SET_CONFIG_PARAM,
+ IA_ENHAACPLUS_DEC_DRC_TARGET_LOUDNESS,
+ &drcRefLevel);
+ RETURN_IF_NE(err_code, IA_NO_ERROR, err_code, "IA_ENHAACPLUS_DEC_DRC_TARGET_LOUDNESS");
+ }
+#endif
if (drcHeavyCompression != -1) {
- ALOGI("set drcHeavyCompression=%d", drcHeavyCompression);
err_code = ixheaacd_dec_api(mXheaacCodecHandle,
IA_API_CMD_SET_CONFIG_PARAM,
IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_HEAVY_COMP,
&drcHeavyCompression);
- ALOGV("return code of IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_HEAVY_COMP: %d",err_code);
+ RETURN_IF_NE(err_code, IA_NO_ERROR, err_code, "IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_HEAVY_COMP");
}
+#ifdef ENABLE_MPEG_D_DRC
+ err_code = ixheaacd_dec_api(mXheaacCodecHandle,
+ IA_API_CMD_SET_CONFIG_PARAM,
+ IA_ENHAACPLUS_DEC_DRC_EFFECT_TYPE,
+ &drEffectType);
+
+#endif
+
+#ifdef ENABLE_MPEG_D_DRC
+ /*Set Effect Type*/
+
+ {
+ err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_GET_CONFIG_PARAM,
+ IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_EFFECT_TYPE, &i_effect_type);
+ RETURN_IF_NE(err_code, IA_NO_ERROR, err_code, "IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_EFFECT_TYPE");
+
+ err_code =
+ ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_CONFIG_PARAM,
+ IA_DRC_DEC_CONFIG_DRC_EFFECT_TYPE, &i_effect_type);
+
+ RETURN_IF_NE(err_code, IA_NO_ERROR, err_code, "IA_DRC_DEC_CONFIG_DRC_EFFECT_TYPE");
+
+ }
+
+/*Set target loudness */
+
+ {
+ err_code = ixheaacd_dec_api(
+ mXheaacCodecHandle, IA_API_CMD_GET_CONFIG_PARAM,
+ IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_TARGET_LOUDNESS, &i_target_loudness);
+ RETURN_IF_NE(err_code, IA_NO_ERROR, err_code, "IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_TARGET_LOUDNESS");
+
+ err_code =
+ ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_CONFIG_PARAM,
+ IA_DRC_DEC_CONFIG_DRC_TARGET_LOUDNESS, &i_target_loudness);
+ RETURN_IF_NE(err_code, IA_NO_ERROR, err_code, "IA_DRC_DEC_CONFIG_DRC_TARGET_LOUDNESS");
+
+ }
+ /*Set loud_norm_flag*/
+ {
+ err_code = ixheaacd_dec_api(
+ mXheaacCodecHandle, IA_API_CMD_GET_CONFIG_PARAM,
+ IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_LOUD_NORM, &i_loud_norm);
+ RETURN_IF_NE(err_code, IA_NO_ERROR , err_code,"IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_LOUD_NORM");
+
+ err_code =
+ ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_CONFIG_PARAM,
+ IA_DRC_DEC_CONFIG_DRC_LOUD_NORM, &i_loud_norm);
+
+ RETURN_IF_NE(err_code, IA_NO_ERROR , err_code,"IA_DRC_DEC_CONFIG_DRC_LOUD_NORM");
+
+ }
+
+#endif
+
+
return IA_NO_ERROR;
}
diff --git a/media/libstagefright/codecs/xaacdec/SoftXAAC.h b/media/libstagefright/codecs/xaacdec/SoftXAAC.h
index f4b4c54..11a9c77 100644
--- a/media/libstagefright/codecs/xaacdec/SoftXAAC.h
+++ b/media/libstagefright/codecs/xaacdec/SoftXAAC.h
@@ -29,12 +29,21 @@
#include "ixheaacd_apicmd_standards.h"
#include "ixheaacd_memory_standards.h"
#include "ixheaacd_aac_config.h"
-//#include "ixheaacd_aac_dec_error.h"
+
+#include "impd_apicmd_standards.h"
+#include "impd_drc_config_params.h"
#define MAX_MEM_ALLOCS 100
extern "C" IA_ERRORCODE ixheaacd_dec_api(pVOID p_ia_module_obj,
WORD32 i_cmd, WORD32 i_idx, pVOID pv_value);
+extern "C" IA_ERRORCODE ia_drc_dec_api(pVOID p_ia_module_obj,
+ WORD32 i_cmd, WORD32 i_idx, pVOID pv_value);
+extern "C" IA_ERRORCODE ixheaacd_get_config_param(pVOID p_ia_process_api_obj,
+ pWORD32 pi_samp_freq,
+ pWORD32 pi_num_chan,
+ pWORD32 pi_pcm_wd_sz,
+ pWORD32 pi_channel_mask);
namespace android {
@@ -88,6 +97,7 @@
int deInitXAACDecoder();
int configXAACDecoder(uint8_t* inBuffer, uint32_t inBufferLength);
+ int configMPEGDDrc();
int decodeXAACStream(uint8_t* inBuffer,
uint32_t inBufferLength,
int32_t *bytesConsumed,
@@ -98,12 +108,17 @@
IA_ERRORCODE setXAACDRCInfo(int32_t drcCut,
int32_t drcBoost,
int32_t drcRefLevel,
- int32_t drcHeavyCompression);
+ int32_t drcHeavyCompression
+#ifdef ENABLE_MPEG_D_DRC
+ ,int32_t drEffectType
+#endif
+ );
bool mEndOfInput;
bool mEndOfOutput;
void* mXheaacCodecHandle;
+ void* mMpegDDrcHandle;
uint32_t mInputBufferSize;
uint32_t mOutputFrameLength;
int8_t* mInputBuffer;
@@ -114,6 +129,11 @@
int32_t mChannelMask;
bool mIsCodecInitialized;
bool mIsCodecConfigFlushRequired;
+ int8_t *drc_ip_buf;
+ int8_t *drc_op_buf;
+ int32_t mpegd_drc_present;
+ int32_t drc_flag;
+// int32_t is_drc_enabled;
void* mMemoryArray[MAX_MEM_ALLOCS];
int32_t mMallocCount;
diff --git a/media/libstagefright/colorconversion/ColorConverter.cpp b/media/libstagefright/colorconversion/ColorConverter.cpp
index 05f4104..c46a40f 100644
--- a/media/libstagefright/colorconversion/ColorConverter.cpp
+++ b/media/libstagefright/colorconversion/ColorConverter.cpp
@@ -85,9 +85,15 @@
|| mDstFormat == OMX_COLOR_Format32bitBGRA8888;
}
+/*
+ * If stride is non-zero, client's stride will be used. For planar
+ * or semi-planar YUV formats, stride must be even numbers.
+ * If stride is zero, it will be calculated based on width and bpp
+ * of the format, assuming no padding on the right edge.
+ */
ColorConverter::BitmapParams::BitmapParams(
void *bits,
- size_t width, size_t height,
+ size_t width, size_t height, size_t stride,
size_t cropLeft, size_t cropTop,
size_t cropRight, size_t cropBottom,
OMX_COLOR_FORMATTYPE colorFromat)
@@ -101,6 +107,8 @@
mCropBottom(cropBottom) {
switch(mColorFormat) {
case OMX_COLOR_Format16bitRGB565:
+ case OMX_COLOR_FormatYUV420Planar16:
+ case OMX_COLOR_FormatCbYCrY:
mBpp = 2;
mStride = 2 * mWidth;
break;
@@ -112,13 +120,7 @@
mStride = 4 * mWidth;
break;
- case OMX_COLOR_FormatYUV420Planar16:
- mBpp = 2;
- mStride = 2 * mWidth;
- break;
-
case OMX_COLOR_FormatYUV420Planar:
- case OMX_COLOR_FormatCbYCrY:
case OMX_QCOM_COLOR_FormatYVU420SemiPlanar:
case OMX_COLOR_FormatYUV420SemiPlanar:
case OMX_TI_COLOR_FormatYUV420PackedSemiPlanar:
@@ -132,6 +134,10 @@
mStride = mWidth;
break;
}
+ // use client's stride if it's specified.
+ if (stride != 0) {
+ mStride = stride;
+ }
}
size_t ColorConverter::BitmapParams::cropWidth() const {
@@ -144,21 +150,21 @@
status_t ColorConverter::convert(
const void *srcBits,
- size_t srcWidth, size_t srcHeight,
+ size_t srcWidth, size_t srcHeight, size_t srcStride,
size_t srcCropLeft, size_t srcCropTop,
size_t srcCropRight, size_t srcCropBottom,
void *dstBits,
- size_t dstWidth, size_t dstHeight,
+ size_t dstWidth, size_t dstHeight, size_t dstStride,
size_t dstCropLeft, size_t dstCropTop,
size_t dstCropRight, size_t dstCropBottom) {
BitmapParams src(
const_cast<void *>(srcBits),
- srcWidth, srcHeight,
+ srcWidth, srcHeight, srcStride,
srcCropLeft, srcCropTop, srcCropRight, srcCropBottom, mSrcFormat);
BitmapParams dst(
dstBits,
- dstWidth, dstHeight,
+ dstWidth, dstHeight, dstStride,
dstCropLeft, dstCropTop, dstCropRight, dstCropBottom, mDstFormat);
if (!((src.mCropLeft & 1) == 0
@@ -792,15 +798,15 @@
uint8_t *kAdjustedClip = initClip();
- uint16_t *dst_ptr = (uint16_t *)dst.mBits
- + dst.mCropTop * dst.mWidth + dst.mCropLeft;
+ uint16_t *dst_ptr = (uint16_t *)((uint8_t *)
+ dst.mBits + dst.mCropTop * dst.mStride + dst.mCropLeft * dst.mBpp);
const uint8_t *src_y =
- (const uint8_t *)src.mBits + src.mCropTop * src.mWidth + src.mCropLeft;
+ (const uint8_t *)src.mBits + src.mCropTop * src.mStride + src.mCropLeft;
const uint8_t *src_u =
- (const uint8_t *)src_y + src.mWidth * src.mHeight
- + src.mCropTop * src.mWidth + src.mCropLeft;
+ (const uint8_t *)src.mBits + src.mHeight * src.mStride +
+ src.mCropTop * src.mStride / 2 + src.mCropLeft;
for (size_t y = 0; y < src.cropHeight(); ++y) {
for (size_t x = 0; x < src.cropWidth(); x += 2) {
@@ -842,13 +848,13 @@
}
}
- src_y += src.mWidth;
+ src_y += src.mStride;
if (y & 1) {
- src_u += src.mWidth;
+ src_u += src.mStride;
}
- dst_ptr += dst.mWidth;
+ dst_ptr = (uint16_t*)((uint8_t*)dst_ptr + dst.mStride);
}
return OK;
diff --git a/media/libstagefright/colorconversion/SoftwareRenderer.cpp b/media/libstagefright/colorconversion/SoftwareRenderer.cpp
index 657a05b..359df3d 100644
--- a/media/libstagefright/colorconversion/SoftwareRenderer.cpp
+++ b/media/libstagefright/colorconversion/SoftwareRenderer.cpp
@@ -44,6 +44,7 @@
mNativeWindow(nativeWindow),
mWidth(0),
mHeight(0),
+ mStride(0),
mCropLeft(0),
mCropTop(0),
mCropRight(0),
@@ -67,9 +68,10 @@
int32_t colorFormatNew;
CHECK(format->findInt32("color-format", &colorFormatNew));
- int32_t widthNew, heightNew;
- CHECK(format->findInt32("stride", &widthNew));
+ int32_t widthNew, heightNew, strideNew;
+ CHECK(format->findInt32("width", &widthNew));
CHECK(format->findInt32("slice-height", &heightNew));
+ CHECK(format->findInt32("stride", &strideNew));
int32_t cropLeftNew, cropTopNew, cropRightNew, cropBottomNew;
if (!format->findRect(
@@ -106,6 +108,7 @@
mColorFormat = static_cast<OMX_COLOR_FORMATTYPE>(colorFormatNew);
mWidth = widthNew;
mHeight = heightNew;
+ mStride = strideNew;
mCropLeft = cropLeftNew;
mCropTop = cropTopNew;
mCropRight = cropRightNew;
@@ -276,20 +279,15 @@
if (mConverter) {
mConverter->convert(
data,
- mWidth, mHeight,
+ mWidth, mHeight, mStride,
mCropLeft, mCropTop, mCropRight, mCropBottom,
dst,
- buf->stride, buf->height,
+ buf->stride, buf->height, 0,
0, 0, mCropWidth - 1, mCropHeight - 1);
} else if (mColorFormat == OMX_COLOR_FormatYUV420Planar) {
- const uint8_t *src_y = (const uint8_t *)data;
- const uint8_t *src_u =
- (const uint8_t *)data + mWidth * mHeight;
- const uint8_t *src_v = src_u + (mWidth / 2 * mHeight / 2);
-
- src_y +=mCropLeft + mCropTop * mWidth;
- src_u +=(mCropLeft + mCropTop * mWidth / 2)/2;
- src_v +=(mCropLeft + mCropTop * mWidth / 2)/2;
+ const uint8_t *src_y = (const uint8_t *)data + mCropTop * mStride + mCropLeft;
+ const uint8_t *src_u = (const uint8_t *)data + mStride * mHeight + mCropTop * mStride / 4;
+ const uint8_t *src_v = (const uint8_t *)src_u + mStride * mHeight / 4;
uint8_t *dst_y = (uint8_t *)dst;
size_t dst_y_size = buf->stride * buf->height;
@@ -305,7 +303,7 @@
for (int y = 0; y < mCropHeight; ++y) {
memcpy(dst_y, src_y, mCropWidth);
- src_y += mWidth;
+ src_y += mStride;
dst_y += buf->stride;
}
@@ -313,19 +311,15 @@
memcpy(dst_u, src_u, (mCropWidth + 1) / 2);
memcpy(dst_v, src_v, (mCropWidth + 1) / 2);
- src_u += mWidth / 2;
- src_v += mWidth / 2;
+ src_u += mStride / 2;
+ src_v += mStride / 2;
dst_u += dst_c_stride;
dst_v += dst_c_stride;
}
} else if (mColorFormat == OMX_COLOR_FormatYUV420Planar16) {
- const uint16_t *src_y = (const uint16_t *)data;
- const uint16_t *src_u = (const uint16_t *)data + mWidth * mHeight;
- const uint16_t *src_v = src_u + (mWidth / 2 * mHeight / 2);
-
- src_y += mCropLeft + mCropTop * mWidth;
- src_u += (mCropLeft + mCropTop * mWidth / 2) / 2;
- src_v += (mCropLeft + mCropTop * mWidth / 2) / 2;
+ const uint8_t *src_y = (const uint8_t *)data + mCropTop * mStride + mCropLeft * 2;
+ const uint8_t *src_u = (const uint8_t *)data + mStride * mHeight + mCropTop * mStride / 4;
+ const uint8_t *src_v = (const uint8_t *)src_u + mStride * mHeight / 4;
uint8_t *dst_y = (uint8_t *)dst;
size_t dst_y_size = buf->stride * buf->height;
@@ -340,21 +334,21 @@
for (int y = 0; y < mCropHeight; ++y) {
for (int x = 0; x < mCropWidth; ++x) {
- dst_y[x] = (uint8_t)(src_y[x] >> 2);
+ dst_y[x] = (uint8_t)(((uint16_t *)src_y)[x] >> 2);
}
- src_y += mWidth;
+ src_y += mStride;
dst_y += buf->stride;
}
for (int y = 0; y < (mCropHeight + 1) / 2; ++y) {
for (int x = 0; x < (mCropWidth + 1) / 2; ++x) {
- dst_u[x] = (uint8_t)(src_u[x] >> 2);
- dst_v[x] = (uint8_t)(src_v[x] >> 2);
+ dst_u[x] = (uint8_t)(((uint16_t *)src_u)[x] >> 2);
+ dst_v[x] = (uint8_t)(((uint16_t *)src_v)[x] >> 2);
}
- src_u += mWidth / 2;
- src_v += mWidth / 2;
+ src_u += mStride / 2;
+ src_v += mStride / 2;
dst_u += dst_c_stride;
dst_v += dst_c_stride;
}
diff --git a/media/libstagefright/httplive/PlaylistFetcher.cpp b/media/libstagefright/httplive/PlaylistFetcher.cpp
index f292c47..9f39b5e 100644
--- a/media/libstagefright/httplive/PlaylistFetcher.cpp
+++ b/media/libstagefright/httplive/PlaylistFetcher.cpp
@@ -1029,7 +1029,8 @@
sp<AMessage> itemMeta;
int64_t itemDurationUs;
int32_t targetDuration;
- if (mPlaylist->meta()->findInt32("target-duration", &targetDuration)) {
+ if (mPlaylist->meta() != NULL
+ && mPlaylist->meta()->findInt32("target-duration", &targetDuration)) {
do {
--index;
if (!mPlaylist->itemAt(index, NULL /* uri */, &itemMeta)
diff --git a/media/libstagefright/include/SoftwareRenderer.h b/media/libstagefright/include/SoftwareRenderer.h
index c286516..64dca4e 100644
--- a/media/libstagefright/include/SoftwareRenderer.h
+++ b/media/libstagefright/include/SoftwareRenderer.h
@@ -51,7 +51,7 @@
ColorConverter *mConverter;
YUVMode mYUVMode;
sp<ANativeWindow> mNativeWindow;
- int32_t mWidth, mHeight;
+ int32_t mWidth, mHeight, mStride;
int32_t mCropLeft, mCropTop, mCropRight, mCropBottom;
int32_t mCropWidth, mCropHeight;
int32_t mRotationDegrees;
diff --git a/media/libstagefright/include/media/stagefright/ColorConverter.h b/media/libstagefright/include/media/stagefright/ColorConverter.h
index 5b3543d..2b8c7c8 100644
--- a/media/libstagefright/include/media/stagefright/ColorConverter.h
+++ b/media/libstagefright/include/media/stagefright/ColorConverter.h
@@ -37,11 +37,11 @@
status_t convert(
const void *srcBits,
- size_t srcWidth, size_t srcHeight,
+ size_t srcWidth, size_t srcHeight, size_t srcStride,
size_t srcCropLeft, size_t srcCropTop,
size_t srcCropRight, size_t srcCropBottom,
void *dstBits,
- size_t dstWidth, size_t dstHeight,
+ size_t dstWidth, size_t dstHeight, size_t dstStride,
size_t dstCropLeft, size_t dstCropTop,
size_t dstCropRight, size_t dstCropBottom);
@@ -49,7 +49,7 @@
struct BitmapParams {
BitmapParams(
void *bits,
- size_t width, size_t height,
+ size_t width, size_t height, size_t stride,
size_t cropLeft, size_t cropTop,
size_t cropRight, size_t cropBottom,
OMX_COLOR_FORMATTYPE colorFromat);
diff --git a/media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp b/media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp
index cd0f75c..361b873 100644
--- a/media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp
+++ b/media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp
@@ -154,12 +154,12 @@
outDef->format.video.nFrameWidth = outputBufferWidth();
outDef->format.video.nFrameHeight = outputBufferHeight();
outDef->format.video.eColorFormat = mOutputFormat;
- outDef->format.video.nStride = outDef->format.video.nFrameWidth;
outDef->format.video.nSliceHeight = outDef->format.video.nFrameHeight;
int32_t bpp = (mOutputFormat == OMX_COLOR_FormatYUV420Planar16) ? 2 : 1;
+ outDef->format.video.nStride = outDef->format.video.nFrameWidth * bpp;
outDef->nBufferSize =
- (outDef->format.video.nStride * outDef->format.video.nSliceHeight * bpp * 3) / 2;
+ (outDef->format.video.nStride * outDef->format.video.nSliceHeight * 3) / 2;
OMX_PARAM_PORTDEFINITIONTYPE *inDef = &editPortInfo(kInputPortIndex)->mDef;
inDef->format.video.nFrameWidth = mWidth;
diff --git a/services/audioflinger/FastMixer.cpp b/services/audioflinger/FastMixer.cpp
index 7a02963..caeede9 100644
--- a/services/audioflinger/FastMixer.cpp
+++ b/services/audioflinger/FastMixer.cpp
@@ -489,6 +489,10 @@
timestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL];
mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL] =
timestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL];
+ // We don't compensate for server - kernel time difference and
+ // only update latency if we have valid info.
+ dumpState->mLatencyMs =
+ (double)mNativeFramesWrittenButNotPresented * 1000 / mSampleRate;
} else {
// HAL reported that more frames were presented than were written
mNativeFramesWrittenButNotPresented = 0;
diff --git a/services/audioflinger/FastMixerDumpState.cpp b/services/audioflinger/FastMixerDumpState.cpp
index 2e4fb8c..b3a2520 100644
--- a/services/audioflinger/FastMixerDumpState.cpp
+++ b/services/audioflinger/FastMixerDumpState.cpp
@@ -68,11 +68,11 @@
dprintf(fd, " FastMixer command=%s writeSequence=%u framesWritten=%u\n"
" numTracks=%u writeErrors=%u underruns=%u overruns=%u\n"
" sampleRate=%u frameCount=%zu measuredWarmup=%.3g ms, warmupCycles=%u\n"
- " mixPeriod=%.2f ms\n",
+ " mixPeriod=%.2f ms latency=%.2f ms\n",
FastMixerState::commandToString(mCommand), mWriteSequence, mFramesWritten,
mNumTracks, mWriteErrors, mUnderruns, mOverruns,
mSampleRate, mFrameCount, measuredWarmupMs, mWarmupCycles,
- mixPeriodSec * 1e3);
+ mixPeriodSec * 1e3, mLatencyMs);
#ifdef FAST_THREAD_STATISTICS
// find the interval of valid samples
uint32_t bounds = mBounds;
diff --git a/services/audioflinger/FastMixerDumpState.h b/services/audioflinger/FastMixerDumpState.h
index 8ef31d1..aed6bc5 100644
--- a/services/audioflinger/FastMixerDumpState.h
+++ b/services/audioflinger/FastMixerDumpState.h
@@ -66,6 +66,7 @@
void dump(int fd) const; // should only be called on a stable copy, not the original
+ double mLatencyMs = 0.; // measured latency, default of 0 if no valid timestamp read.
uint32_t mWriteSequence; // incremented before and after each write()
uint32_t mFramesWritten; // total number of frames written successfully
uint32_t mNumTracks; // total number of active fast tracks
diff --git a/services/audioflinger/PatchPanel.cpp b/services/audioflinger/PatchPanel.cpp
index a08da96..3d688fb 100644
--- a/services/audioflinger/PatchPanel.cpp
+++ b/services/audioflinger/PatchPanel.cpp
@@ -227,13 +227,16 @@
audio_devices_t device = patch->sinks[0].ext.device.type;
String8 address = String8(patch->sinks[0].ext.device.address);
audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
+ audio_output_flags_t flags =
+ patch->sinks[0].config_mask & AUDIO_PORT_CONFIG_FLAGS ?
+ patch->sinks[0].flags.output : AUDIO_OUTPUT_FLAG_NONE;
sp<ThreadBase> thread = mAudioFlinger.openOutput_l(
patch->sinks[0].ext.device.hw_module,
&output,
&config,
device,
address,
- AUDIO_OUTPUT_FLAG_NONE);
+ flags);
ALOGV("mAudioFlinger.openOutput_l() returned %p", thread.get());
if (thread == 0) {
status = NO_MEMORY;
@@ -262,6 +265,9 @@
} else {
config.format = newPatch.mPlayback.thread()->format();
}
+ audio_input_flags_t flags =
+ patch->sources[0].config_mask & AUDIO_PORT_CONFIG_FLAGS ?
+ patch->sources[0].flags.input : AUDIO_INPUT_FLAG_NONE;
audio_io_handle_t input = AUDIO_IO_HANDLE_NONE;
sp<ThreadBase> thread = mAudioFlinger.openInput_l(srcModule,
&input,
@@ -269,7 +275,7 @@
device,
address,
AUDIO_SOURCE_MIC,
- AUDIO_INPUT_FLAG_NONE);
+ flags);
ALOGV("mAudioFlinger.openInput_l() returned %p inChannelMask %08x",
thread.get(), config.channel_mask);
if (thread == 0) {
diff --git a/services/audioflinger/PlaybackTracks.h b/services/audioflinger/PlaybackTracks.h
index ff9444d..9a8a154 100644
--- a/services/audioflinger/PlaybackTracks.h
+++ b/services/audioflinger/PlaybackTracks.h
@@ -41,7 +41,7 @@
virtual ~Track();
virtual status_t initCheck() const;
- static void appendDumpHeader(String8& result);
+ void appendDumpHeader(String8& result);
void appendDump(String8& result, bool active);
virtual status_t start(AudioSystem::sync_event_t event =
AudioSystem::SYNC_EVENT_NONE,
@@ -75,6 +75,7 @@
bool isOffloadedOrDirect() const { return (mFlags
& (AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD
| AUDIO_OUTPUT_FLAG_DIRECT)) != 0; }
+ bool isStatic() const { return mSharedBuffer.get() != nullptr; }
status_t setParameters(const String8& keyValuePairs);
status_t attachAuxEffect(int EffectId);
@@ -93,6 +94,11 @@
virtual bool isFastTrack() const { return (mFlags & AUDIO_OUTPUT_FLAG_FAST) != 0; }
+ virtual double bufferLatencyMs() {
+ return isStatic() ? 0.
+ : (double)mAudioTrackServerProxy->framesReadySafe() * 1000 / sampleRate();
+ }
+
// implement volume handling.
media::VolumeShaper::Status applyVolumeShaper(
const sp<media::VolumeShaper::Configuration>& configuration,
@@ -194,6 +200,7 @@
sp<media::VolumeHandler> mVolumeHandler; // handles multiple VolumeShaper configs and operations
+ bool mDumpLatency = false; // true if track supports latency dumps.
private:
// The following fields are only for fast tracks, and should be in a subclass
int mFastIndex; // index within FastMixerState::mFastTracks[];
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 90e2e8e..ee4283e 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -1784,7 +1784,7 @@
if (numtracks) {
dprintf(fd, " of which %zu are active\n", numactive);
result.append(prefix);
- Track::appendDumpHeader(result);
+ mTracks[0]->appendDumpHeader(result);
for (size_t i = 0; i < numtracks; ++i) {
sp<Track> track = mTracks[i];
if (track != 0) {
@@ -1804,7 +1804,7 @@
result.append(" The following tracks are in the active list but"
" not in the track list\n");
result.append(prefix);
- Track::appendDumpHeader(result);
+ mActiveTracks[0]->appendDumpHeader(result);
for (size_t i = 0; i < numactive; ++i) {
sp<Track> track = mActiveTracks[i];
if (mTracks.indexOf(track) < 0) {
@@ -3792,6 +3792,10 @@
config->role = AUDIO_PORT_ROLE_SOURCE;
config->ext.mix.hw_module = mOutput->audioHwDev->handle();
config->ext.mix.usecase.stream = AUDIO_STREAM_DEFAULT;
+ if (mOutput && mOutput->flags != AUDIO_OUTPUT_FLAG_NONE) {
+ config->config_mask |= AUDIO_PORT_CONFIG_FLAGS;
+ config->flags.output = mOutput->flags;
+ }
}
// ----------------------------------------------------------------------------
@@ -5054,6 +5058,10 @@
dprintf(fd, " Thread throttle time (msecs): %u\n", mThreadThrottleTimeMs);
dprintf(fd, " AudioMixer tracks: %s\n", mAudioMixer->trackNames().c_str());
dprintf(fd, " Master mono: %s\n", mMasterMono ? "on" : "off");
+ const double latencyMs = mTimestamp.getOutputServerLatencyMs(mSampleRate);
+ if (latencyMs > 0.) {
+ dprintf(fd, " NormalMixer latency ms: %.2lf\n", latencyMs);
+ }
if (hasFastMixer()) {
dprintf(fd, " FastMixer thread %p tid=%d", mFastMixer.get(), mFastMixer->getTid());
@@ -7894,6 +7902,10 @@
config->role = AUDIO_PORT_ROLE_SINK;
config->ext.mix.hw_module = mInput->audioHwDev->handle();
config->ext.mix.usecase.source = mAudioSource;
+ if (mInput && mInput->flags != AUDIO_INPUT_FLAG_NONE) {
+ config->config_mask |= AUDIO_PORT_CONFIG_FLAGS;
+ config->flags.input = mInput->flags;
+ }
}
// ----------------------------------------------------------------------------
@@ -8861,6 +8873,15 @@
}
}
+void AudioFlinger::MmapPlaybackThread::toAudioPortConfig(struct audio_port_config *config)
+{
+ MmapThread::toAudioPortConfig(config);
+ if (mOutput && mOutput->flags != AUDIO_OUTPUT_FLAG_NONE) {
+ config->config_mask |= AUDIO_PORT_CONFIG_FLAGS;
+ config->flags.output = mOutput->flags;
+ }
+}
+
void AudioFlinger::MmapPlaybackThread::dumpInternals(int fd, const Vector<String16>& args)
{
MmapThread::dumpInternals(fd, args);
@@ -8951,4 +8972,13 @@
}
}
+void AudioFlinger::MmapCaptureThread::toAudioPortConfig(struct audio_port_config *config)
+{
+ MmapThread::toAudioPortConfig(config);
+ if (mInput && mInput->flags != AUDIO_INPUT_FLAG_NONE) {
+ config->config_mask |= AUDIO_PORT_CONFIG_FLAGS;
+ config->flags.input = mInput->flags;
+ }
+}
+
} // namespace android
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index f18294b..680e021 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -1679,6 +1679,8 @@
void updateMetadata_l() override;
+ virtual void toAudioPortConfig(struct audio_port_config *config);
+
protected:
audio_stream_type_t mStreamType;
@@ -1707,6 +1709,8 @@
void processVolume_l() override;
void setRecordSilenced(uid_t uid, bool silenced) override;
+ virtual void toAudioPortConfig(struct audio_port_config *config);
+
protected:
AudioStreamIn* mInput;
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index 824dac7..6953ebf 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -435,6 +435,7 @@
}
mName = TRACK_NAME_PENDING;
+ mDumpLatency = thread->type() == ThreadBase::MIXER;
#ifdef TEE_SINK
mTee.setId(std::string("_") + std::to_string(mThreadIoHandle)
+ "_" + std::to_string(mId) +
@@ -489,13 +490,14 @@
}
}
-/*static*/ void AudioFlinger::PlaybackThread::Track::appendDumpHeader(String8& result)
+void AudioFlinger::PlaybackThread::Track::appendDumpHeader(String8& result)
{
- result.append("T Name Active Client Session S Flags "
+ result.appendFormat("T Name Active Client Session S Flags "
" Format Chn mask SRate "
"ST L dB R dB VS dB "
- " Server FrmCnt FrmRdy F Underruns Flushed "
- "Main Buf Aux Buf\n");
+ " Server FrmCnt FrmRdy F Underruns Flushed"
+ "%s\n",
+ mDumpLatency ? " Latency" : "");
}
void AudioFlinger::PlaybackThread::Track::appendDump(String8& result, bool active)
@@ -504,7 +506,7 @@
switch (mType) {
case TYPE_DEFAULT:
case TYPE_OUTPUT:
- if (mSharedBuffer.get() != nullptr) {
+ if (isStatic()) {
trackType = 'S'; // static
} else {
trackType = ' '; // normal
@@ -582,8 +584,7 @@
result.appendFormat("%7s %6u %7u %2s 0x%03X "
"%08X %08X %6u "
"%2u %5.2g %5.2g %5.2g%c "
- "%08X %6zu%c %6zu %c %9u%c %7u "
- "%08zX %08zX\n",
+ "%08X %6zu%c %6zu %c %9u%c %7u",
active ? "yes" : "no",
(mClient == 0) ? getpid() : mClient->pid(),
mSessionId,
@@ -607,11 +608,19 @@
fillingStatus,
mAudioTrackServerProxy->getUnderrunFrames(),
nowInUnderrun,
- (unsigned)mAudioTrackServerProxy->framesFlushed() % 10000000,
-
- (size_t)mMainBuffer, // use %zX as %p appends 0x
- (size_t)mAuxBuffer // use %zX as %p appends 0x
+ (unsigned)mAudioTrackServerProxy->framesFlushed() % 10000000
);
+ if (mDumpLatency) {
+ double latencyMs =
+ mAudioTrackServerProxy->getTimestamp().getOutputServerLatencyMs(mSampleRate);
+ if (latencyMs > 0.) {
+ latencyMs += bufferLatencyMs();
+ result.appendFormat(" %7.3f", latencyMs);
+ } else {
+ result.appendFormat(" Unknown");
+ }
+ }
+ result.append("\n");
}
uint32_t AudioFlinger::PlaybackThread::Track::sampleRate() const {
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioPort.h b/services/audiopolicy/common/managerdefinitions/include/AudioPort.h
index 09a86dd..bd7517f 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioPort.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioPort.h
@@ -153,9 +153,6 @@
class AudioPortConfig : public virtual RefBase
{
public:
- AudioPortConfig();
- virtual ~AudioPortConfig() {}
-
status_t applyAudioPortConfig(const struct audio_port_config *config,
struct audio_port_config *backupConfig = NULL);
virtual void toAudioPortConfig(struct audio_port_config *dstConfig,
@@ -165,10 +162,11 @@
return (other != 0) &&
(other->getAudioPort()->getModuleHandle() == getAudioPort()->getModuleHandle());
}
- uint32_t mSamplingRate;
- audio_format_t mFormat;
- audio_channel_mask_t mChannelMask;
- struct audio_gain_config mGain;
+ unsigned int mSamplingRate = 0u;
+ audio_format_t mFormat = AUDIO_FORMAT_INVALID;
+ audio_channel_mask_t mChannelMask = AUDIO_CHANNEL_NONE;
+ struct audio_gain_config mGain = { .index = -1 };
+ union audio_io_flags mFlags = { AUDIO_INPUT_FLAG_NONE };
};
} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp
index fc868d3..3fe37ab 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp
@@ -386,15 +386,6 @@
// --- AudioPortConfig class implementation
-AudioPortConfig::AudioPortConfig()
-{
- mSamplingRate = 0;
- mChannelMask = AUDIO_CHANNEL_NONE;
- mFormat = AUDIO_FORMAT_INVALID;
- memset(&mGain, 0, sizeof(struct audio_gain_config));
- mGain.index = -1;
-}
-
status_t AudioPortConfig::applyAudioPortConfig(const struct audio_port_config *config,
struct audio_port_config *backupConfig)
{
@@ -424,6 +415,9 @@
if (config->config_mask & AUDIO_PORT_CONFIG_GAIN) {
mGain = config->gain;
}
+ if (config->config_mask & AUDIO_PORT_CONFIG_FLAGS) {
+ mFlags = config->flags;
+ }
exit:
if (status != NO_ERROR) {
@@ -435,33 +429,38 @@
return status;
}
+namespace {
+
+template<typename T>
+void updateField(
+ const T& portConfigField, T audio_port_config::*port_config_field,
+ struct audio_port_config *dstConfig, const struct audio_port_config *srcConfig,
+ unsigned int configMask, T defaultValue)
+{
+ if (dstConfig->config_mask & configMask) {
+ if ((srcConfig != nullptr) && (srcConfig->config_mask & configMask)) {
+ dstConfig->*port_config_field = srcConfig->*port_config_field;
+ } else {
+ dstConfig->*port_config_field = portConfigField;
+ }
+ } else {
+ dstConfig->*port_config_field = defaultValue;
+ }
+}
+
+} // namespace
+
void AudioPortConfig::toAudioPortConfig(struct audio_port_config *dstConfig,
const struct audio_port_config *srcConfig) const
{
- if (dstConfig->config_mask & AUDIO_PORT_CONFIG_SAMPLE_RATE) {
- dstConfig->sample_rate = mSamplingRate;
- if ((srcConfig != NULL) && (srcConfig->config_mask & AUDIO_PORT_CONFIG_SAMPLE_RATE)) {
- dstConfig->sample_rate = srcConfig->sample_rate;
- }
- } else {
- dstConfig->sample_rate = 0;
- }
- if (dstConfig->config_mask & AUDIO_PORT_CONFIG_CHANNEL_MASK) {
- dstConfig->channel_mask = mChannelMask;
- if ((srcConfig != NULL) && (srcConfig->config_mask & AUDIO_PORT_CONFIG_CHANNEL_MASK)) {
- dstConfig->channel_mask = srcConfig->channel_mask;
- }
- } else {
- dstConfig->channel_mask = AUDIO_CHANNEL_NONE;
- }
- if (dstConfig->config_mask & AUDIO_PORT_CONFIG_FORMAT) {
- dstConfig->format = mFormat;
- if ((srcConfig != NULL) && (srcConfig->config_mask & AUDIO_PORT_CONFIG_FORMAT)) {
- dstConfig->format = srcConfig->format;
- }
- } else {
- dstConfig->format = AUDIO_FORMAT_INVALID;
- }
+ updateField(mSamplingRate, &audio_port_config::sample_rate,
+ dstConfig, srcConfig, AUDIO_PORT_CONFIG_SAMPLE_RATE, 0u);
+ updateField(mChannelMask, &audio_port_config::channel_mask,
+ dstConfig, srcConfig, AUDIO_PORT_CONFIG_CHANNEL_MASK,
+ (audio_channel_mask_t)AUDIO_CHANNEL_NONE);
+ updateField(mFormat, &audio_port_config::format,
+ dstConfig, srcConfig, AUDIO_PORT_CONFIG_FORMAT, AUDIO_FORMAT_INVALID);
+
sp<AudioPort> audioport = getAudioPort();
if ((dstConfig->config_mask & AUDIO_PORT_CONFIG_GAIN) && audioport != NULL) {
dstConfig->gain = mGain;
@@ -477,6 +476,9 @@
} else {
dstConfig->config_mask &= ~AUDIO_PORT_CONFIG_GAIN;
}
+
+ updateField(mFlags, &audio_port_config::flags,
+ dstConfig, srcConfig, AUDIO_PORT_CONFIG_FLAGS, { AUDIO_INPUT_FLAG_NONE });
}
} // namespace android
diff --git a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
index 1d8f10d..d3a1188 100644
--- a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
@@ -528,10 +528,13 @@
item->setCString(kAudioPolicyRqstSrc,
audioSourceString(client->attributes.source).c_str());
- item->setCString(kAudioPolicyRqstPkg,
- std::string(String8(client->opPackageName).string()).c_str());
item->setInt32(kAudioPolicyRqstSession, client->session);
-
+ if (client->opPackageName.size() != 0) {
+ item->setCString(kAudioPolicyRqstPkg,
+ std::string(String8(client->opPackageName).string()).c_str());
+ } else {
+ item->setCString(kAudioPolicyRqstPkg, to_string(client->uid).c_str());
+ }
item->setCString(
kAudioPolicyRqstDevice, getDeviceTypeStrForPortId(client->deviceId).c_str());
@@ -550,9 +553,13 @@
// keeps the last of the clients marked active
item->setCString(kAudioPolicyActiveSrc,
audioSourceString(other->attributes.source).c_str());
- item->setCString(kAudioPolicyActivePkg,
- std::string(String8(other->opPackageName).string()).c_str());
item->setInt32(kAudioPolicyActiveSession, other->session);
+ if (other->opPackageName.size() != 0) {
+ item->setCString(kAudioPolicyActivePkg,
+ std::string(String8(other->opPackageName).string()).c_str());
+ } else {
+ item->setCString(kAudioPolicyRqstPkg, to_string(other->uid).c_str());
+ }
item->setCString(kAudioPolicyActiveDevice,
getDeviceTypeStrForPortId(other->deviceId).c_str());
}
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index c87b5eb..282871b 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -2337,10 +2337,13 @@
void CameraService::Client::notifyError(int32_t errorCode,
const CaptureResultExtras& resultExtras) {
- (void) errorCode;
(void) resultExtras;
if (mRemoteCallback != NULL) {
- mRemoteCallback->notifyCallback(CAMERA_MSG_ERROR, CAMERA_ERROR_RELEASED, 0);
+ int32_t api1ErrorCode = CAMERA_ERROR_RELEASED;
+ if (errorCode == hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_DISABLED) {
+ api1ErrorCode = CAMERA_ERROR_DISABLED;
+ }
+ mRemoteCallback->notifyCallback(CAMERA_MSG_ERROR, api1ErrorCode, 0);
} else {
ALOGE("mRemoteCallback is NULL!!");
}
diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp b/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp
index 37e1495..8dc9863 100644
--- a/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp
@@ -802,29 +802,25 @@
__FUNCTION__);
continue;
}
- uint8_t afMode = entry.data.u8[0];
- if (afMode == ANDROID_CONTROL_AF_MODE_OFF) {
- // Skip all the ZSL buffer for manual AF mode, as we don't really
- // know the af state.
- continue;
- }
-
// Check AF state if device has focuser and focus mode isn't fixed
- if (mHasFocuser && !isFixedFocusMode(afMode)) {
- // Make sure the candidate frame has good focus.
- entry = frame.find(ANDROID_CONTROL_AF_STATE);
- if (entry.count == 0) {
- ALOGW("%s: ZSL queue frame has no AF state field!",
- __FUNCTION__);
- continue;
- }
- uint8_t afState = entry.data.u8[0];
- if (afState != ANDROID_CONTROL_AF_STATE_PASSIVE_FOCUSED &&
- afState != ANDROID_CONTROL_AF_STATE_FOCUSED_LOCKED &&
- afState != ANDROID_CONTROL_AF_STATE_NOT_FOCUSED_LOCKED) {
- ALOGVV("%s: ZSL queue frame AF state is %d is not good for capture, skip it",
- __FUNCTION__, afState);
- continue;
+ if (mHasFocuser) {
+ uint8_t afMode = entry.data.u8[0];
+ if (!isFixedFocusMode(afMode)) {
+ // Make sure the candidate frame has good focus.
+ entry = frame.find(ANDROID_CONTROL_AF_STATE);
+ if (entry.count == 0) {
+ ALOGW("%s: ZSL queue frame has no AF state field!",
+ __FUNCTION__);
+ continue;
+ }
+ uint8_t afState = entry.data.u8[0];
+ if (afState != ANDROID_CONTROL_AF_STATE_PASSIVE_FOCUSED &&
+ afState != ANDROID_CONTROL_AF_STATE_FOCUSED_LOCKED &&
+ afState != ANDROID_CONTROL_AF_STATE_NOT_FOCUSED_LOCKED) {
+ ALOGVV("%s: ZSL queue frame AF state is %d is not good for capture,"
+ " skip it", __FUNCTION__, afState);
+ continue;
+ }
}
}
diff --git a/services/mediacodec/Android.mk b/services/mediacodec/Android.mk
index 089d62b..db5f0ff 100644
--- a/services/mediacodec/Android.mk
+++ b/services/mediacodec/Android.mk
@@ -22,7 +22,6 @@
libstagefright_soft_vorbisdec \
libstagefright_soft_vpxdec \
libstagefright_soft_vpxenc \
- libstagefright_soft_xaacdec \
# service executable
include $(CLEAR_VARS)