Merge "Use audio tag for system tracing" into jb-dev
diff --git a/include/media/stagefright/MediaCodec.h b/include/media/stagefright/MediaCodec.h
index d96007b..e46e8e9 100644
--- a/include/media/stagefright/MediaCodec.h
+++ b/include/media/stagefright/MediaCodec.h
@@ -210,6 +210,9 @@
void extractCSD(const sp<AMessage> &format);
status_t queueCSDInputBuffer(size_t bufferIndex);
+ status_t setNativeWindow(
+ const sp<SurfaceTextureClient> &surfaceTextureClient);
+
DISALLOW_EVIL_CONSTRUCTORS(MediaCodec);
};
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index e032cfc..5b513a8 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -796,9 +796,6 @@
break;
}
- mReplyID = replyID;
- setState(CONFIGURING);
-
sp<RefBase> obj;
if (!msg->findObject("native-window", &obj)) {
obj.clear();
@@ -810,15 +807,24 @@
if (obj != NULL) {
format->setObject("native-window", obj);
- if (mFlags & kFlagIsSoftwareCodec) {
- mNativeWindow =
- static_cast<NativeWindowWrapper *>(obj.get())
- ->getSurfaceTextureClient();
+ status_t err = setNativeWindow(
+ static_cast<NativeWindowWrapper *>(obj.get())
+ ->getSurfaceTextureClient());
+
+ if (err != OK) {
+ sp<AMessage> response = new AMessage;
+ response->setInt32("err", err);
+
+ response->postReply(replyID);
+ break;
}
} else {
- mNativeWindow.clear();
+ setNativeWindow(NULL);
}
+ mReplyID = replyID;
+ setState(CONFIGURING);
+
void *crypto;
if (!msg->findPointer("crypto", &crypto)) {
crypto = NULL;
@@ -1180,12 +1186,12 @@
}
void MediaCodec::setState(State newState) {
- if (newState == INITIALIZED) {
+ if (newState == INITIALIZED || newState == UNINITIALIZED) {
delete mSoftRenderer;
mSoftRenderer = NULL;
mCrypto.clear();
- mNativeWindow.clear();
+ setNativeWindow(NULL);
mOutputFormat.clear();
mFlags &= ~kFlagOutputFormatChanged;
@@ -1425,4 +1431,37 @@
return index;
}
+status_t MediaCodec::setNativeWindow(
+ const sp<SurfaceTextureClient> &surfaceTextureClient) {
+ status_t err;
+
+ if (mNativeWindow != NULL) {
+ err = native_window_api_disconnect(
+ mNativeWindow.get(), NATIVE_WINDOW_API_MEDIA);
+
+ if (err != OK) {
+ ALOGW("native_window_api_disconnect returned an error: %s (%d)",
+ strerror(-err), err);
+ }
+
+ mNativeWindow.clear();
+ }
+
+ if (surfaceTextureClient != NULL) {
+ err = native_window_api_connect(
+ surfaceTextureClient.get(), NATIVE_WINDOW_API_MEDIA);
+
+ if (err != OK) {
+ ALOGE("native_window_api_connect returned an error: %s (%d)",
+ strerror(-err), err);
+
+ return err;
+ }
+
+ mNativeWindow = surfaceTextureClient;
+ }
+
+ return OK;
+}
+
} // namespace android
diff --git a/media/libstagefright/codecs/aacdec/SoftAAC2.cpp b/media/libstagefright/codecs/aacdec/SoftAAC2.cpp
index bf7befd..e499a0b 100644
--- a/media/libstagefright/codecs/aacdec/SoftAAC2.cpp
+++ b/media/libstagefright/codecs/aacdec/SoftAAC2.cpp
@@ -267,6 +267,15 @@
inBuffer[0] = header->pBuffer + header->nOffset;
inBufferLength[0] = header->nFilledLen;
+ // Make the decoder more robust by pruning explicit backward compatible
+ // extension for LC, HE-AACv1 (SBR), HE-AACv2 (SBR + PS). We'll depend
+ // on implicit configuration.
+ if (inBufferLength[0] > 2) {
+ UCHAR aot = inBuffer[0][0] >> 3;
+ if (aot == 2 | aot == 5 | aot == 29) {
+ inBufferLength[0] = 2;
+ }
+ }
AAC_DECODER_ERROR decoderErr =
aacDecoder_ConfigRaw(mAACDecoder,
inBuffer,
diff --git a/media/libstagefright/codecs/amrnb/dec/SoftAMR.cpp b/media/libstagefright/codecs/amrnb/dec/SoftAMR.cpp
index 796caa4..c5f733b 100644
--- a/media/libstagefright/codecs/amrnb/dec/SoftAMR.cpp
+++ b/media/libstagefright/codecs/amrnb/dec/SoftAMR.cpp
@@ -258,10 +258,14 @@
}
static size_t getFrameSize(unsigned FT) {
- static const size_t kFrameSizeWB[9] = {
- 132, 177, 253, 285, 317, 365, 397, 461, 477
+ static const size_t kFrameSizeWB[10] = {
+ 132, 177, 253, 285, 317, 365, 397, 461, 477, 40
};
+ if (FT >= 10) {
+ return 1;
+ }
+
size_t frameSize = kFrameSizeWB[FT];
// Round up bits to bytes and add 1 for the header byte.
@@ -336,30 +340,47 @@
}
} else {
int16 mode = ((inputPtr[0] >> 3) & 0x0f);
+
+ if (mode >= 10 && mode <= 13) {
+ ALOGE("encountered illegal frame type %d in AMR WB content.",
+ mode);
+
+ notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL);
+ mSignalledError = true;
+
+ return;
+ }
+
size_t frameSize = getFrameSize(mode);
CHECK_GE(inHeader->nFilledLen, frameSize);
- int16 frameType;
- RX_State_wb rx_state;
- mime_unsorting(
- const_cast<uint8_t *>(&inputPtr[1]),
- mInputSampleBuffer,
- &frameType, &mode, 1, &rx_state);
-
int16_t *outPtr = (int16_t *)outHeader->pBuffer;
- int16_t numSamplesOutput;
- pvDecoder_AmrWb(
- mode, mInputSampleBuffer,
- outPtr,
- &numSamplesOutput,
- mDecoderBuf, frameType, mDecoderCookie);
+ if (mode >= 9) {
+ // Produce silence instead of comfort noise and for
+ // speech lost/no data.
+ memset(outPtr, 0, kNumSamplesPerFrameWB * sizeof(int16_t));
+ } else if (mode < 9) {
+ int16 frameType;
+ RX_State_wb rx_state;
+ mime_unsorting(
+ const_cast<uint8_t *>(&inputPtr[1]),
+ mInputSampleBuffer,
+ &frameType, &mode, 1, &rx_state);
- CHECK_EQ((int)numSamplesOutput, (int)kNumSamplesPerFrameWB);
+ int16_t numSamplesOutput;
+ pvDecoder_AmrWb(
+ mode, mInputSampleBuffer,
+ outPtr,
+ &numSamplesOutput,
+ mDecoderBuf, frameType, mDecoderCookie);
- for (int i = 0; i < kNumSamplesPerFrameWB; ++i) {
- /* Delete the 2 LSBs (14-bit output) */
- outPtr[i] &= 0xfffC;
+ CHECK_EQ((int)numSamplesOutput, (int)kNumSamplesPerFrameWB);
+
+ for (int i = 0; i < kNumSamplesPerFrameWB; ++i) {
+ /* Delete the 2 LSBs (14-bit output) */
+ outPtr[i] &= 0xfffC;
+ }
}
numBytesRead = frameSize;