Merge "Channel counts are uint32_t"
diff --git a/camera/CameraMetadata.cpp b/camera/CameraMetadata.cpp
index f447c5b..7765914 100644
--- a/camera/CameraMetadata.cpp
+++ b/camera/CameraMetadata.cpp
@@ -133,11 +133,19 @@
 }
 
 status_t CameraMetadata::append(const CameraMetadata &other) {
+    return append(other.mBuffer);
+}
+
+status_t CameraMetadata::append(const camera_metadata_t* other) {
     if (mLocked) {
         ALOGE("%s: CameraMetadata is locked", __FUNCTION__);
         return INVALID_OPERATION;
     }
-    return append_camera_metadata(mBuffer, other.mBuffer);
+    size_t extraEntries = get_camera_metadata_entry_count(other);
+    size_t extraData = get_camera_metadata_data_count(other);
+    resizeIfNeeded(extraEntries, extraData);
+
+    return append_camera_metadata(mBuffer, other);
 }
 
 size_t CameraMetadata::entryCount() const {
diff --git a/include/camera/CameraMetadata.h b/include/camera/CameraMetadata.h
index fe2bd19..1254d3c 100644
--- a/include/camera/CameraMetadata.h
+++ b/include/camera/CameraMetadata.h
@@ -99,6 +99,11 @@
     status_t append(const CameraMetadata &other);
 
     /**
+     * Append metadata from a raw camera_metadata buffer
+     */
+    status_t append(const camera_metadata* other);
+
+    /**
      * Number of metadata entries.
      */
     size_t entryCount() const;
diff --git a/include/media/stagefright/ACodec.h b/include/media/stagefright/ACodec.h
index a8ffd4a..15c99a5 100644
--- a/include/media/stagefright/ACodec.h
+++ b/include/media/stagefright/ACodec.h
@@ -247,6 +247,8 @@
             int32_t numChannels, int32_t sampleRate, int32_t bitRate,
             int32_t aacProfile, bool isADTS);
 
+    status_t setupAC3Codec(bool encoder, int32_t numChannels, int32_t sampleRate);
+
     status_t selectAudioPortFormat(
             OMX_U32 portIndex, OMX_AUDIO_CODINGTYPE desiredFormat);
 
diff --git a/include/media/stagefright/MediaDefs.h b/include/media/stagefright/MediaDefs.h
index 85693d4..cf5beda 100644
--- a/include/media/stagefright/MediaDefs.h
+++ b/include/media/stagefright/MediaDefs.h
@@ -44,6 +44,7 @@
 extern const char *MEDIA_MIMETYPE_AUDIO_FLAC;
 extern const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS;
 extern const char *MEDIA_MIMETYPE_AUDIO_MSGSM;
+extern const char *MEDIA_MIMETYPE_AUDIO_AC3;
 
 extern const char *MEDIA_MIMETYPE_CONTAINER_MPEG4;
 extern const char *MEDIA_MIMETYPE_CONTAINER_WAV;
diff --git a/include/media/stagefright/OMXCodec.h b/include/media/stagefright/OMXCodec.h
index daaf20f..5121c17 100644
--- a/include/media/stagefright/OMXCodec.h
+++ b/include/media/stagefright/OMXCodec.h
@@ -248,6 +248,8 @@
             int32_t numChannels, int32_t sampleRate, int32_t bitRate,
             int32_t aacProfile, bool isADTS);
 
+    status_t setAC3Format(int32_t numChannels, int32_t sampleRate);
+
     void setG711Format(int32_t numChannels);
 
     status_t setVideoPortFormatType(
diff --git a/media/libmedia/SoundPool.cpp b/media/libmedia/SoundPool.cpp
index 22e9fad..b420c95 100644
--- a/media/libmedia/SoundPool.cpp
+++ b/media/libmedia/SoundPool.cpp
@@ -600,16 +600,15 @@
         // wrong audio audio buffer size  (mAudioBufferSize)
         unsigned long toggle = mToggle ^ 1;
         void *userData = (void *)((unsigned long)this | toggle);
-        uint32_t channels = (numChannels == 2) ?
-                AUDIO_CHANNEL_OUT_STEREO : AUDIO_CHANNEL_OUT_MONO;
+        audio_channel_mask_t channelMask = audio_channel_out_mask_from_count(numChannels);
 
         // do not create a new audio track if current track is compatible with sample parameters
 #ifdef USE_SHARED_MEM_BUFFER
         newTrack = new AudioTrack(streamType, sampleRate, sample->format(),
-                channels, sample->getIMemory(), AUDIO_OUTPUT_FLAG_FAST, callback, userData);
+                channelMask, sample->getIMemory(), AUDIO_OUTPUT_FLAG_FAST, callback, userData);
 #else
         newTrack = new AudioTrack(streamType, sampleRate, sample->format(),
-                channels, frameCount, AUDIO_OUTPUT_FLAG_FAST, callback, userData,
+                channelMask, frameCount, AUDIO_OUTPUT_FLAG_FAST, callback, userData,
                 bufferFrames);
 #endif
         oldTrack = mAudioTrack;
diff --git a/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp b/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp
index d8b35d7..f1782cc 100644
--- a/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp
@@ -201,7 +201,16 @@
     switch (what) {
         case LiveSession::kWhatPrepared:
         {
-            notifyVideoSizeChanged(0, 0);
+            // notify the current size here if we have it, otherwise report an initial size of (0,0)
+            sp<AMessage> format = getFormat(false /* audio */);
+            int32_t width;
+            int32_t height;
+            if (format != NULL &&
+                    format->findInt32("width", &width) && format->findInt32("height", &height)) {
+                notifyVideoSizeChanged(width, height);
+            } else {
+                notifyVideoSizeChanged(0, 0);
+            }
 
             uint32_t flags = FLAG_CAN_PAUSE;
             if (mLiveSession->isSeekable()) {
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index 1adab38..e7b5caf 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -35,7 +35,9 @@
 
 #include <media/hardware/HardwareAPI.h>
 
+#include <OMX_AudioExt.h>
 #include <OMX_Component.h>
+#include <OMX_IndexExt.h>
 
 #include "include/avc_utils.h"
 
@@ -965,6 +967,10 @@
             "audio_decoder.flac", "audio_encoder.flac" },
         { MEDIA_MIMETYPE_AUDIO_MSGSM,
             "audio_decoder.gsm", "audio_encoder.gsm" },
+        { MEDIA_MIMETYPE_VIDEO_MPEG2,
+            "video_decoder.mpeg2", "video_encoder.mpeg2" },
+        { MEDIA_MIMETYPE_AUDIO_AC3,
+            "audio_decoder.ac3", "audio_encoder.ac3" },
     };
 
     static const size_t kNumMimeToRole =
@@ -1256,6 +1262,15 @@
         } else {
             err = setupRawAudioFormat(kPortIndexInput, sampleRate, numChannels);
         }
+    } else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AC3)) {
+        int32_t numChannels;
+        int32_t sampleRate;
+        if (!msg->findInt32("channel-count", &numChannels)
+                || !msg->findInt32("sample-rate", &sampleRate)) {
+            err = INVALID_OPERATION;
+        } else {
+            err = setupAC3Codec(encoder, numChannels, sampleRate);
+        }
     }
 
     if (err != OK) {
@@ -1452,6 +1467,44 @@
             mNode, OMX_IndexParamAudioAac, &profile, sizeof(profile));
 }
 
+status_t ACodec::setupAC3Codec(
+        bool encoder, int32_t numChannels, int32_t sampleRate) {
+    status_t err = setupRawAudioFormat(
+            encoder ? kPortIndexInput : kPortIndexOutput, sampleRate, numChannels);
+
+    if (err != OK) {
+        return err;
+    }
+
+    if (encoder) {
+        ALOGW("AC3 encoding is not supported.");
+        return INVALID_OPERATION;
+    }
+
+    OMX_AUDIO_PARAM_ANDROID_AC3TYPE def;
+    InitOMXParams(&def);
+    def.nPortIndex = kPortIndexInput;
+
+    err = mOMX->getParameter(
+            mNode,
+            (OMX_INDEXTYPE)OMX_IndexParamAudioAndroidAc3,
+            &def,
+            sizeof(def));
+
+    if (err != OK) {
+        return err;
+    }
+
+    def.nChannels = numChannels;
+    def.nSampleRate = sampleRate;
+
+    return mOMX->setParameter(
+            mNode,
+            (OMX_INDEXTYPE)OMX_IndexParamAudioAndroidAc3,
+            &def,
+            sizeof(def));
+}
+
 static OMX_AUDIO_AMRBANDMODETYPE pickModeFromBitRate(
         bool isAMRWB, int32_t bps) {
     if (isAMRWB) {
@@ -2530,7 +2583,7 @@
         {
             OMX_AUDIO_PORTDEFINITIONTYPE *audioDef = &def.format.audio;
 
-            switch (audioDef->eEncoding) {
+            switch ((int)audioDef->eEncoding) {
                 case OMX_AUDIO_CodingPCM:
                 {
                     OMX_AUDIO_PARAM_PCMMODETYPE params;
@@ -2636,6 +2689,24 @@
                     break;
                 }
 
+                case OMX_AUDIO_CodingAndroidAC3:
+                {
+                    OMX_AUDIO_PARAM_ANDROID_AC3TYPE params;
+                    InitOMXParams(&params);
+                    params.nPortIndex = kPortIndexOutput;
+
+                    CHECK_EQ((status_t)OK, mOMX->getParameter(
+                            mNode,
+                            (OMX_INDEXTYPE)OMX_IndexParamAudioAndroidAc3,
+                            &params,
+                            sizeof(params)));
+
+                    notify->setString("mime", MEDIA_MIMETYPE_AUDIO_AC3);
+                    notify->setInt32("channel-count", params.nChannels);
+                    notify->setInt32("sample-rate", params.nSampleRate);
+                    break;
+                }
+
                 default:
                     TRESPASS();
             }
@@ -3072,11 +3143,16 @@
         /* these are unfilled buffers returned by client */
         CHECK(msg->findInt32("err", &err));
 
-        ALOGV("[%s] saw error %d instead of an input buffer",
-             mCodec->mComponentName.c_str(), err);
+        if (err == OK) {
+            /* buffers with no errors are returned on MediaCodec.flush */
+            mode = KEEP_BUFFERS;
+        } else {
+            ALOGV("[%s] saw error %d instead of an input buffer",
+                 mCodec->mComponentName.c_str(), err);
+            eos = true;
+        }
 
         buffer.clear();
-        mode = KEEP_BUFFERS;
     }
 
     int32_t tmp;
diff --git a/media/libstagefright/AwesomePlayer.cpp b/media/libstagefright/AwesomePlayer.cpp
index 52bd896..e1f6563 100644
--- a/media/libstagefright/AwesomePlayer.cpp
+++ b/media/libstagefright/AwesomePlayer.cpp
@@ -606,6 +606,9 @@
 
     mWatchForAudioSeekComplete = false;
     mWatchForAudioEOS = false;
+
+    mMediaRenderingStartGeneration = 0;
+    mStartGeneration = 0;
 }
 
 void AwesomePlayer::notifyListener_l(int msg, int ext1, int ext2) {
@@ -895,6 +898,8 @@
         return OK;
     }
 
+    mMediaRenderingStartGeneration = ++mStartGeneration;
+
     if (!(mFlags & PREPARED)) {
         status_t err = prepare_l();
 
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index e299caf..8af1aaf 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -1506,7 +1506,8 @@
             info->mOwnedByClient = false;
 
             if (portIndex == kPortIndexInput) {
-                msg->setInt32("err", ERROR_END_OF_STREAM);
+                /* no error, just returning buffers */
+                msg->setInt32("err", OK);
             }
             msg->post();
         }
diff --git a/media/libstagefright/MediaDefs.cpp b/media/libstagefright/MediaDefs.cpp
index b5d4e44..340cba7 100644
--- a/media/libstagefright/MediaDefs.cpp
+++ b/media/libstagefright/MediaDefs.cpp
@@ -42,6 +42,7 @@
 const char *MEDIA_MIMETYPE_AUDIO_FLAC = "audio/flac";
 const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS = "audio/aac-adts";
 const char *MEDIA_MIMETYPE_AUDIO_MSGSM = "audio/gsm";
+const char *MEDIA_MIMETYPE_AUDIO_AC3 = "audio/ac3";
 
 const char *MEDIA_MIMETYPE_CONTAINER_MPEG4 = "video/mp4";
 const char *MEDIA_MIMETYPE_CONTAINER_WAV = "audio/x-wav";
diff --git a/media/libstagefright/OMXCodec.cpp b/media/libstagefright/OMXCodec.cpp
index 7f56af8..063ab49 100644
--- a/media/libstagefright/OMXCodec.cpp
+++ b/media/libstagefright/OMXCodec.cpp
@@ -40,7 +40,9 @@
 #include <utils/Vector.h>
 
 #include <OMX_Audio.h>
+#include <OMX_AudioExt.h>
 #include <OMX_Component.h>
+#include <OMX_IndexExt.h>
 
 #include "include/avc_utils.h"
 
@@ -533,6 +535,17 @@
                     sampleRate,
                     numChannels);
         }
+    } else if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_AC3, mMIME)) {
+        int32_t numChannels;
+        int32_t sampleRate;
+        CHECK(meta->findInt32(kKeyChannelCount, &numChannels));
+        CHECK(meta->findInt32(kKeySampleRate, &sampleRate));
+
+        status_t err = setAC3Format(numChannels, sampleRate);
+        if (err != OK) {
+            CODEC_LOGE("setAC3Format() failed (err = %d)", err);
+            return err;
+        }
     } else if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_G711_ALAW, mMIME)
             || !strcasecmp(MEDIA_MIMETYPE_AUDIO_G711_MLAW, mMIME)) {
         // These are PCM-like formats with a fixed sample rate but
@@ -1400,6 +1413,10 @@
             "audio_decoder.flac", "audio_encoder.flac" },
         { MEDIA_MIMETYPE_AUDIO_MSGSM,
             "audio_decoder.gsm", "audio_encoder.gsm" },
+        { MEDIA_MIMETYPE_VIDEO_MPEG2,
+            "video_decoder.mpeg2", "video_encoder.mpeg2" },
+        { MEDIA_MIMETYPE_AUDIO_AC3,
+            "audio_decoder.ac3", "audio_encoder.ac3" },
     };
 
     static const size_t kNumMimeToRole =
@@ -3495,6 +3512,31 @@
     return OK;
 }
 
+status_t OMXCodec::setAC3Format(int32_t numChannels, int32_t sampleRate) {
+    OMX_AUDIO_PARAM_ANDROID_AC3TYPE def;
+    InitOMXParams(&def);
+    def.nPortIndex = kPortIndexInput;
+
+    status_t err = mOMX->getParameter(
+            mNode,
+            (OMX_INDEXTYPE)OMX_IndexParamAudioAndroidAc3,
+            &def,
+            sizeof(def));
+
+    if (err != OK) {
+        return err;
+    }
+
+    def.nChannels = numChannels;
+    def.nSampleRate = sampleRate;
+
+    return mOMX->setParameter(
+            mNode,
+            (OMX_INDEXTYPE)OMX_IndexParamAudioAndroidAc3,
+            &def,
+            sizeof(def));
+}
+
 void OMXCodec::setG711Format(int32_t numChannels) {
     CHECK(!mIsEncoder);
     setRawAudioFormat(kPortIndexInput, 8000, numChannels);
@@ -4428,6 +4470,17 @@
                 mOutputFormat->setInt32(kKeyChannelCount, numChannels);
                 mOutputFormat->setInt32(kKeySampleRate, sampleRate);
                 mOutputFormat->setInt32(kKeyBitRate, bitRate);
+            } else if (audio_def->eEncoding ==
+                    (OMX_AUDIO_CODINGTYPE)OMX_AUDIO_CodingAndroidAC3) {
+                mOutputFormat->setCString(
+                        kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_AC3);
+                int32_t numChannels, sampleRate, bitRate;
+                inputFormat->findInt32(kKeyChannelCount, &numChannels);
+                inputFormat->findInt32(kKeySampleRate, &sampleRate);
+                inputFormat->findInt32(kKeyBitRate, &bitRate);
+                mOutputFormat->setInt32(kKeyChannelCount, numChannels);
+                mOutputFormat->setInt32(kKeySampleRate, sampleRate);
+                mOutputFormat->setInt32(kKeyBitRate, bitRate);
             } else {
                 CHECK(!"Should not be here. Unknown audio encoding.");
             }
diff --git a/media/libstagefright/codecs/avc/enc/SoftAVCEncoder.cpp b/media/libstagefright/codecs/avc/enc/SoftAVCEncoder.cpp
index 1d66120..4a21a3e 100644
--- a/media/libstagefright/codecs/avc/enc/SoftAVCEncoder.cpp
+++ b/media/libstagefright/codecs/avc/enc/SoftAVCEncoder.cpp
@@ -871,7 +871,13 @@
         CHECK(encoderStatus == AVCENC_SUCCESS || encoderStatus == AVCENC_NEW_IDR);
         dataLength = outHeader->nAllocLen;  // Reset the output buffer length
         if (inHeader->nFilledLen > 0) {
+            if (outHeader->nAllocLen >= 4) {
+                memcpy(outPtr, "\x00\x00\x00\x01", 4);
+                outPtr += 4;
+                dataLength -= 4;
+            }
             encoderStatus = PVAVCEncodeNAL(mHandle, outPtr, &dataLength, &type);
+            dataLength = outPtr + dataLength - outHeader->pBuffer;
             if (encoderStatus == AVCENC_SUCCESS) {
                 CHECK(NULL == PVAVCEncGetOverrunBuffer(mHandle));
             } else if (encoderStatus == AVCENC_PICTURE_READY) {
diff --git a/media/libstagefright/mpeg2ts/ATSParser.cpp b/media/libstagefright/mpeg2ts/ATSParser.cpp
index 9850a46..f87b9da 100644
--- a/media/libstagefright/mpeg2ts/ATSParser.cpp
+++ b/media/libstagefright/mpeg2ts/ATSParser.cpp
@@ -506,6 +506,11 @@
                     ElementaryStreamQueue::PCM_AUDIO);
             break;
 
+        case STREAMTYPE_AC3:
+            mQueue = new ElementaryStreamQueue(
+                    ElementaryStreamQueue::AC3);
+            break;
+
         default:
             break;
     }
@@ -614,6 +619,7 @@
         case STREAMTYPE_MPEG2_AUDIO:
         case STREAMTYPE_MPEG2_AUDIO_ADTS:
         case STREAMTYPE_PCM_AUDIO:
+        case STREAMTYPE_AC3:
             return true;
 
         default:
diff --git a/media/libstagefright/mpeg2ts/ATSParser.h b/media/libstagefright/mpeg2ts/ATSParser.h
index a10edc9..d4e30b4 100644
--- a/media/libstagefright/mpeg2ts/ATSParser.h
+++ b/media/libstagefright/mpeg2ts/ATSParser.h
@@ -88,6 +88,10 @@
         STREAMTYPE_MPEG2_AUDIO_ADTS     = 0x0f,
         STREAMTYPE_MPEG4_VIDEO          = 0x10,
         STREAMTYPE_H264                 = 0x1b,
+
+        // From ATSC A/53 Part 3:2009, 6.7.1
+        STREAMTYPE_AC3                  = 0x81,
+
         STREAMTYPE_PCM_AUDIO            = 0x83,
     };
 
diff --git a/media/libstagefright/mpeg2ts/ESQueue.cpp b/media/libstagefright/mpeg2ts/ESQueue.cpp
index 8f9c9c8..ea79885 100644
--- a/media/libstagefright/mpeg2ts/ESQueue.cpp
+++ b/media/libstagefright/mpeg2ts/ESQueue.cpp
@@ -56,6 +56,122 @@
     }
 }
 
+// Parse AC3 header assuming the current ptr is start position of syncframe,
+// update metadata only applicable, and return the payload size
+static unsigned parseAC3SyncFrame(
+        const uint8_t *ptr, size_t size, sp<MetaData> *metaData) {
+    static const unsigned channelCountTable[] = {2, 1, 2, 3, 4, 4, 5, 6};
+    static const unsigned samplingRateTable[] = {48000, 44100, 32000};
+    static const unsigned rates[] = {32, 40, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 256,
+            320, 384, 448, 512, 576, 640};
+
+    static const unsigned frameSizeTable[19][3] = {
+        { 64, 69, 96 },
+        { 80, 87, 120 },
+        { 96, 104, 144 },
+        { 112, 121, 168 },
+        { 128, 139, 192 },
+        { 160, 174, 240 },
+        { 192, 208, 288 },
+        { 224, 243, 336 },
+        { 256, 278, 384 },
+        { 320, 348, 480 },
+        { 384, 417, 576 },
+        { 448, 487, 672 },
+        { 512, 557, 768 },
+        { 640, 696, 960 },
+        { 768, 835, 1152 },
+        { 896, 975, 1344 },
+        { 1024, 1114, 1536 },
+        { 1152, 1253, 1728 },
+        { 1280, 1393, 1920 },
+    };
+
+    ABitReader bits(ptr, size);
+    unsigned syncStartPos = 0;  // in bytes
+    if (bits.numBitsLeft() < 16) {
+        return 0;
+    }
+    if (bits.getBits(16) != 0x0B77) {
+        return 0;
+    }
+
+    if (bits.numBitsLeft() < 16 + 2 + 6 + 5 + 3 + 3) {
+        ALOGV("Not enough bits left for further parsing");
+        return 0;
+    }
+    bits.skipBits(16);  // crc1
+
+    unsigned fscod = bits.getBits(2);
+    if (fscod == 3) {
+        ALOGW("Incorrect fscod in AC3 header");
+        return 0;
+    }
+
+    unsigned frmsizecod = bits.getBits(6);
+    if (frmsizecod > 37) {
+        ALOGW("Incorrect frmsizecod in AC3 header");
+        return 0;
+    }
+
+    unsigned bsid = bits.getBits(5);
+    if (bsid > 8) {
+        ALOGW("Incorrect bsid in AC3 header. Possibly E-AC-3?");
+        return 0;
+    }
+
+    unsigned bsmod = bits.getBits(3);
+    unsigned acmod = bits.getBits(3);
+    unsigned cmixlev = 0;
+    unsigned surmixlev = 0;
+    unsigned dsurmod = 0;
+
+    if ((acmod & 1) > 0 && acmod != 1) {
+        if (bits.numBitsLeft() < 2) {
+            return 0;
+        }
+        cmixlev = bits.getBits(2);
+    }
+    if ((acmod & 4) > 0) {
+        if (bits.numBitsLeft() < 2) {
+            return 0;
+        }
+        surmixlev = bits.getBits(2);
+    }
+    if (acmod == 2) {
+        if (bits.numBitsLeft() < 2) {
+            return 0;
+        }
+        dsurmod = bits.getBits(2);
+    }
+
+    if (bits.numBitsLeft() < 1) {
+        return 0;
+    }
+    unsigned lfeon = bits.getBits(1);
+
+    unsigned samplingRate = samplingRateTable[fscod];
+    unsigned payloadSize = frameSizeTable[frmsizecod >> 1][fscod];
+    if (fscod == 1) {
+        payloadSize += frmsizecod & 1;
+    }
+    payloadSize <<= 1;  // convert from 16-bit words to bytes
+
+    unsigned channelCount = channelCountTable[acmod] + lfeon;
+
+    if (metaData != NULL) {
+        (*metaData)->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_AC3);
+        (*metaData)->setInt32(kKeyChannelCount, channelCount);
+        (*metaData)->setInt32(kKeySampleRate, samplingRate);
+    }
+
+    return payloadSize;
+}
+
+static bool IsSeeminglyValidAC3Header(const uint8_t *ptr, size_t size) {
+    return parseAC3SyncFrame(ptr, size, NULL) > 0;
+}
+
 static bool IsSeeminglyValidADTSHeader(const uint8_t *ptr, size_t size) {
     if (size < 3) {
         // Not enough data to verify header.
@@ -224,6 +340,33 @@
                 break;
             }
 
+            case AC3:
+            {
+                uint8_t *ptr = (uint8_t *)data;
+
+                ssize_t startOffset = -1;
+                for (size_t i = 0; i < size; ++i) {
+                    if (IsSeeminglyValidAC3Header(&ptr[i], size - i)) {
+                        startOffset = i;
+                        break;
+                    }
+                }
+
+                if (startOffset < 0) {
+                    return ERROR_MALFORMED;
+                }
+
+                if (startOffset > 0) {
+                    ALOGI("found something resembling an AC3 syncword at "
+                          "offset %d",
+                          startOffset);
+                }
+
+                data = &ptr[startOffset];
+                size -= startOffset;
+                break;
+            }
+
             case MPEG_AUDIO:
             {
                 uint8_t *ptr = (uint8_t *)data;
@@ -328,6 +471,8 @@
             return dequeueAccessUnitH264();
         case AAC:
             return dequeueAccessUnitAAC();
+        case AC3:
+            return dequeueAccessUnitAC3();
         case MPEG_VIDEO:
             return dequeueAccessUnitMPEGVideo();
         case MPEG4_VIDEO:
@@ -340,6 +485,51 @@
     }
 }
 
+sp<ABuffer> ElementaryStreamQueue::dequeueAccessUnitAC3() {
+    unsigned syncStartPos = 0;  // in bytes
+    unsigned payloadSize = 0;
+    sp<MetaData> format = new MetaData;
+    while (true) {
+        if (syncStartPos + 2 >= mBuffer->size()) {
+            return NULL;
+        }
+
+        payloadSize = parseAC3SyncFrame(
+                mBuffer->data() + syncStartPos,
+                mBuffer->size() - syncStartPos,
+                &format);
+        if (payloadSize > 0) {
+            break;
+        }
+        ++syncStartPos;
+    }
+
+    if (mBuffer->size() < syncStartPos + payloadSize) {
+        ALOGV("Not enough buffer size for AC3");
+        return NULL;
+    }
+
+    if (mFormat == NULL) {
+        mFormat = format;
+    }
+
+    sp<ABuffer> accessUnit = new ABuffer(syncStartPos + payloadSize);
+    memcpy(accessUnit->data(), mBuffer->data(), syncStartPos + payloadSize);
+
+    int64_t timeUs = fetchTimestamp(syncStartPos + payloadSize);
+    CHECK_GE(timeUs, 0ll);
+    accessUnit->meta()->setInt64("timeUs", timeUs);
+
+    memmove(
+            mBuffer->data(),
+            mBuffer->data() + syncStartPos + payloadSize,
+            mBuffer->size() - syncStartPos - payloadSize);
+
+    mBuffer->setRange(0, mBuffer->size() - syncStartPos - payloadSize);
+
+    return accessUnit;
+}
+
 sp<ABuffer> ElementaryStreamQueue::dequeueAccessUnitPCMAudio() {
     if (mBuffer->size() < 4) {
         return NULL;
diff --git a/media/libstagefright/mpeg2ts/ESQueue.h b/media/libstagefright/mpeg2ts/ESQueue.h
index 66a8087..a2cca77 100644
--- a/media/libstagefright/mpeg2ts/ESQueue.h
+++ b/media/libstagefright/mpeg2ts/ESQueue.h
@@ -32,6 +32,7 @@
     enum Mode {
         H264,
         AAC,
+        AC3,
         MPEG_AUDIO,
         MPEG_VIDEO,
         MPEG4_VIDEO,
@@ -67,6 +68,7 @@
 
     sp<ABuffer> dequeueAccessUnitH264();
     sp<ABuffer> dequeueAccessUnitAAC();
+    sp<ABuffer> dequeueAccessUnitAC3();
     sp<ABuffer> dequeueAccessUnitMPEGAudio();
     sp<ABuffer> dequeueAccessUnitMPEGVideo();
     sp<ABuffer> dequeueAccessUnitMPEG4Video();
diff --git a/media/libstagefright/omx/GraphicBufferSource.cpp b/media/libstagefright/omx/GraphicBufferSource.cpp
index f295e5a..a7c5317 100644
--- a/media/libstagefright/omx/GraphicBufferSource.cpp
+++ b/media/libstagefright/omx/GraphicBufferSource.cpp
@@ -148,6 +148,18 @@
     }
 }
 
+void GraphicBufferSource::omxIdle() {
+    ALOGV("omxIdle");
+
+    Mutex::Autolock autoLock(mMutex);
+
+    if (mExecuting) {
+        // We are only interested in the transition from executing->idle,
+        // not loaded->idle.
+        mEndOfStream = mEndOfStreamSent = true;
+    }
+}
+
 void GraphicBufferSource::omxLoaded(){
     Mutex::Autolock autoLock(mMutex);
     if (!mExecuting) {
@@ -213,7 +225,12 @@
     // see if the GraphicBuffer reference was null, which should only ever
     // happen for EOS.
     if (codecBuffer.mGraphicBuffer == NULL) {
-        CHECK(mEndOfStream && mEndOfStreamSent);
+        if (!(mEndOfStream && mEndOfStreamSent)) {
+            // This can happen when broken code sends us the same buffer
+            // twice in a row.
+            ALOGE("ERROR: codecBufferEmptied on non-EOS null buffer "
+                    "(buffer emptied twice?)");
+        }
         // No GraphicBuffer to deal with, no additional input or output is
         // expected, so just return.
         return;
diff --git a/media/libstagefright/omx/GraphicBufferSource.h b/media/libstagefright/omx/GraphicBufferSource.h
index 244a843..9e5eee6 100644
--- a/media/libstagefright/omx/GraphicBufferSource.h
+++ b/media/libstagefright/omx/GraphicBufferSource.h
@@ -69,6 +69,11 @@
     // sitting in the BufferQueue, this will send them to the codec.
     void omxExecuting();
 
+    // This is called when OMX transitions to OMX_StateIdle, indicating that
+    // the codec is meant to return all buffers back to the client for them
+    // to be freed. Do NOT submit any more buffers to the component.
+    void omxIdle();
+
     // This is called when OMX transitions to OMX_StateLoaded, indicating that
     // we are shutting down.
     void omxLoaded();
diff --git a/media/libstagefright/omx/OMXNodeInstance.cpp b/media/libstagefright/omx/OMXNodeInstance.cpp
index 46e5d71..5f104fc 100644
--- a/media/libstagefright/omx/OMXNodeInstance.cpp
+++ b/media/libstagefright/omx/OMXNodeInstance.cpp
@@ -243,13 +243,18 @@
 status_t OMXNodeInstance::sendCommand(
         OMX_COMMANDTYPE cmd, OMX_S32 param) {
     const sp<GraphicBufferSource>& bufferSource(getGraphicBufferSource());
-    if (bufferSource != NULL
-            && cmd == OMX_CommandStateSet
-            && param == OMX_StateLoaded) {
-        // Initiating transition from Executing -> Loaded
-        // Buffers are about to be freed.
-        bufferSource->omxLoaded();
-        setGraphicBufferSource(NULL);
+    if (bufferSource != NULL && cmd == OMX_CommandStateSet) {
+        if (param == OMX_StateIdle) {
+            // Initiating transition from Executing -> Idle
+            // ACodec is waiting for all buffers to be returned, do NOT
+            // submit any more buffers to the codec.
+            bufferSource->omxIdle();
+        } else if (param == OMX_StateLoaded) {
+            // Initiating transition from Idle/Executing -> Loaded
+            // Buffers are about to be freed.
+            bufferSource->omxLoaded();
+            setGraphicBufferSource(NULL);
+        }
 
         // fall through
     }
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 360db4f..79c41f7 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -2340,6 +2340,7 @@
                                         strategy,
                                         sessionId,
                                         effect->id());
+            AudioSystem::setEffectEnabled(effect->id(), effect->isEnabled());
         }
         effect = chain->getEffectFromId_l(0);
     }
@@ -2354,6 +2355,7 @@
                                             strategy,
                                             sessionId,
                                             removed[i]->id());
+                AudioSystem::setEffectEnabled(effect->id(), effect->isEnabled());
             }
         }
     }
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index d244c14..9137040 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -499,7 +499,7 @@
     private:
         const char * const mModuleName;
         audio_hw_device_t * const mHwDevice;
-        Flags mFlags;
+        const Flags mFlags;
     };
 
     // AudioStreamOut and AudioStreamIn are immutable, so their fields are const.
@@ -509,7 +509,7 @@
     struct AudioStreamOut {
         AudioHwDevice* const audioHwDev;
         audio_stream_out_t* const stream;
-        audio_output_flags_t flags;
+        const audio_output_flags_t flags;
 
         audio_hw_device_t* hwDev() const { return audioHwDev->hwDevice(); }
 
diff --git a/services/audioflinger/AudioMixer.cpp b/services/audioflinger/AudioMixer.cpp
index 8bd04c6..8bea752 100644
--- a/services/audioflinger/AudioMixer.cpp
+++ b/services/audioflinger/AudioMixer.cpp
@@ -58,7 +58,7 @@
 status_t AudioMixer::DownmixerBufferProvider::getNextBuffer(AudioBufferProvider::Buffer *pBuffer,
         int64_t pts) {
     //ALOGV("DownmixerBufferProvider::getNextBuffer()");
-    if (this->mTrackBufferProvider != NULL) {
+    if (mTrackBufferProvider != NULL) {
         status_t res = mTrackBufferProvider->getNextBuffer(pBuffer, pts);
         if (res == OK) {
             mDownmixConfig.inputCfg.buffer.frameCount = pBuffer->frameCount;
@@ -81,7 +81,7 @@
 
 void AudioMixer::DownmixerBufferProvider::releaseBuffer(AudioBufferProvider::Buffer *pBuffer) {
     //ALOGV("DownmixerBufferProvider::releaseBuffer()");
-    if (this->mTrackBufferProvider != NULL) {
+    if (mTrackBufferProvider != NULL) {
         mTrackBufferProvider->releaseBuffer(pBuffer);
     } else {
         ALOGE("DownmixerBufferProvider::releaseBuffer() error: NULL track buffer provider");
@@ -90,9 +90,9 @@
 
 
 // ----------------------------------------------------------------------------
-bool AudioMixer::isMultichannelCapable = false;
+bool AudioMixer::sIsMultichannelCapable = false;
 
-effect_descriptor_t AudioMixer::dwnmFxDesc;
+effect_descriptor_t AudioMixer::sDwnmFxDesc;
 
 // Ensure mConfiguredNames bitmask is initialized properly on all architectures.
 // The value of 1 << x is undefined in C when x >= 32.
@@ -113,8 +113,6 @@
     // AudioMixer is not yet capable of multi-channel output beyond stereo
     ALOG_ASSERT(2 == MAX_NUM_CHANNELS, "bad MAX_NUM_CHANNELS %d", MAX_NUM_CHANNELS);
 
-    LocalClock lc;
-
     pthread_once(&sOnceControl, &sInitRoutine);
 
     mState.enabledTracks= 0;
@@ -136,27 +134,6 @@
         t++;
     }
 
-    // find multichannel downmix effect if we have to play multichannel content
-    uint32_t numEffects = 0;
-    int ret = EffectQueryNumberEffects(&numEffects);
-    if (ret != 0) {
-        ALOGE("AudioMixer() error %d querying number of effects", ret);
-        return;
-    }
-    ALOGV("EffectQueryNumberEffects() numEffects=%d", numEffects);
-
-    for (uint32_t i = 0 ; i < numEffects ; i++) {
-        if (EffectQueryEffect(i, &dwnmFxDesc) == 0) {
-            ALOGV("effect %d is called %s", i, dwnmFxDesc.name);
-            if (memcmp(&dwnmFxDesc.type, EFFECT_UIID_DOWNMIX, sizeof(effect_uuid_t)) == 0) {
-                ALOGI("found effect \"%s\" from %s",
-                        dwnmFxDesc.name, dwnmFxDesc.implementor);
-                isMultichannelCapable = true;
-                break;
-            }
-        }
-    }
-    ALOGE_IF(!isMultichannelCapable, "unable to find downmix effect");
 }
 
 AudioMixer::~AudioMixer()
@@ -276,13 +253,13 @@
     DownmixerBufferProvider* pDbp = new DownmixerBufferProvider();
     int32_t status;
 
-    if (!isMultichannelCapable) {
+    if (!sIsMultichannelCapable) {
         ALOGE("prepareTrackForDownmix(%d) fails: mixer doesn't support multichannel content",
                 trackName);
         goto noDownmixForActiveTrack;
     }
 
-    if (EffectCreate(&dwnmFxDesc.uuid,
+    if (EffectCreate(&sDwnmFxDesc.uuid,
             pTrack->sessionId /*sessionId*/, -2 /*ioId not relevant here, using random value*/,
             &pDbp->mDownmixHandle/*pHandle*/) != 0) {
         ALOGE("prepareTrackForDownmix(%d) fails: error creating downmixer effect", trackName);
@@ -667,27 +644,29 @@
         countActiveTracks++;
         track_t& t = state->tracks[i];
         uint32_t n = 0;
+        // FIXME can overflow (mask is only 3 bits)
         n |= NEEDS_CHANNEL_1 + t.channelCount - 1;
-        n |= NEEDS_FORMAT_16;
-        n |= t.doesResample() ? NEEDS_RESAMPLE_ENABLED : NEEDS_RESAMPLE_DISABLED;
+        if (t.doesResample()) {
+            n |= NEEDS_RESAMPLE;
+        }
         if (t.auxLevel != 0 && t.auxBuffer != NULL) {
-            n |= NEEDS_AUX_ENABLED;
+            n |= NEEDS_AUX;
         }
 
         if (t.volumeInc[0]|t.volumeInc[1]) {
             volumeRamp = true;
         } else if (!t.doesResample() && t.volumeRL == 0) {
-            n |= NEEDS_MUTE_ENABLED;
+            n |= NEEDS_MUTE;
         }
         t.needs = n;
 
-        if ((n & NEEDS_MUTE__MASK) == NEEDS_MUTE_ENABLED) {
+        if (n & NEEDS_MUTE) {
             t.hook = track__nop;
         } else {
-            if ((n & NEEDS_AUX__MASK) == NEEDS_AUX_ENABLED) {
+            if (n & NEEDS_AUX) {
                 all16BitsStereoNoResample = false;
             }
-            if ((n & NEEDS_RESAMPLE__MASK) == NEEDS_RESAMPLE_ENABLED) {
+            if (n & NEEDS_RESAMPLE) {
                 all16BitsStereoNoResample = false;
                 resampling = true;
                 t.hook = track__genericResample;
@@ -753,7 +732,7 @@
             en &= ~(1<<i);
             track_t& t = state->tracks[i];
             if (!t.doesResample() && t.volumeRL == 0) {
-                t.needs |= NEEDS_MUTE_ENABLED;
+                t.needs |= NEEDS_MUTE;
                 t.hook = track__nop;
             } else {
                 allMuted = false;
@@ -1157,7 +1136,7 @@
                 track_t& t = state->tracks[i];
                 size_t outFrames = BLOCKSIZE;
                 int32_t *aux = NULL;
-                if (CC_UNLIKELY((t.needs & NEEDS_AUX__MASK) == NEEDS_AUX_ENABLED)) {
+                if (CC_UNLIKELY(t.needs & NEEDS_AUX)) {
                     aux = t.auxBuffer + numFrames;
                 }
                 while (outFrames) {
@@ -1238,14 +1217,14 @@
             e1 &= ~(1<<i);
             track_t& t = state->tracks[i];
             int32_t *aux = NULL;
-            if (CC_UNLIKELY((t.needs & NEEDS_AUX__MASK) == NEEDS_AUX_ENABLED)) {
+            if (CC_UNLIKELY(t.needs & NEEDS_AUX)) {
                 aux = t.auxBuffer;
             }
 
             // this is a little goofy, on the resampling case we don't
             // acquire/release the buffers because it's done by
             // the resampler.
-            if ((t.needs & NEEDS_RESAMPLE__MASK) == NEEDS_RESAMPLE_ENABLED) {
+            if (t.needs & NEEDS_RESAMPLE) {
                 t.resampler->setPTS(pts);
                 t.hook(&t, outTemp, numFrames, state->resampleTemp, aux);
             } else {
@@ -1459,6 +1438,28 @@
 {
     LocalClock lc;
     sLocalTimeFreq = lc.getLocalFreq();
+
+    // find multichannel downmix effect if we have to play multichannel content
+    uint32_t numEffects = 0;
+    int ret = EffectQueryNumberEffects(&numEffects);
+    if (ret != 0) {
+        ALOGE("AudioMixer() error %d querying number of effects", ret);
+        return;
+    }
+    ALOGV("EffectQueryNumberEffects() numEffects=%d", numEffects);
+
+    for (uint32_t i = 0 ; i < numEffects ; i++) {
+        if (EffectQueryEffect(i, &sDwnmFxDesc) == 0) {
+            ALOGV("effect %d is called %s", i, sDwnmFxDesc.name);
+            if (memcmp(&sDwnmFxDesc.type, EFFECT_UIID_DOWNMIX, sizeof(effect_uuid_t)) == 0) {
+                ALOGI("found effect \"%s\" from %s",
+                        sDwnmFxDesc.name, sDwnmFxDesc.implementor);
+                sIsMultichannelCapable = true;
+                break;
+            }
+        }
+    }
+    ALOGW_IF(!sIsMultichannelCapable, "unable to find downmix effect");
 }
 
 // ----------------------------------------------------------------------------
diff --git a/services/audioflinger/AudioMixer.h b/services/audioflinger/AudioMixer.h
index 43aeb86..d5c9da7 100644
--- a/services/audioflinger/AudioMixer.h
+++ b/services/audioflinger/AudioMixer.h
@@ -120,27 +120,19 @@
 private:
 
     enum {
+        // FIXME this representation permits up to 8 channels
         NEEDS_CHANNEL_COUNT__MASK   = 0x00000007,
-        NEEDS_FORMAT__MASK          = 0x000000F0,
-        NEEDS_MUTE__MASK            = 0x00000100,
-        NEEDS_RESAMPLE__MASK        = 0x00001000,
-        NEEDS_AUX__MASK             = 0x00010000,
     };
 
     enum {
-        NEEDS_CHANNEL_1             = 0x00000000,
-        NEEDS_CHANNEL_2             = 0x00000001,
+        NEEDS_CHANNEL_1             = 0x00000000,   // mono
+        NEEDS_CHANNEL_2             = 0x00000001,   // stereo
 
-        NEEDS_FORMAT_16             = 0x00000010,
+        // sample format is not explicitly specified, and is assumed to be AUDIO_FORMAT_PCM_16_BIT
 
-        NEEDS_MUTE_DISABLED         = 0x00000000,
-        NEEDS_MUTE_ENABLED          = 0x00000100,
-
-        NEEDS_RESAMPLE_DISABLED     = 0x00000000,
-        NEEDS_RESAMPLE_ENABLED      = 0x00001000,
-
-        NEEDS_AUX_DISABLED     = 0x00000000,
-        NEEDS_AUX_ENABLED      = 0x00010000,
+        NEEDS_MUTE                  = 0x00000100,
+        NEEDS_RESAMPLE              = 0x00001000,
+        NEEDS_AUX                   = 0x00010000,
     };
 
     struct state_t;
@@ -256,9 +248,9 @@
     state_t         mState __attribute__((aligned(32)));
 
     // effect descriptor for the downmixer used by the mixer
-    static effect_descriptor_t dwnmFxDesc;
+    static effect_descriptor_t sDwnmFxDesc;
     // indicates whether a downmix effect has been found and is usable by this mixer
-    static bool                isMultichannelCapable;
+    static bool                sIsMultichannelCapable;
 
     // Call after changing either the enabled status of a track, or parameters of an enabled track.
     // OK to call more often than that, but unnecessary.
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index c14d4b4..7474f1a 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -3683,6 +3683,9 @@
                 if (--(track->mRetryCount) <= 0) {
                     ALOGV("BUFFER TIMEOUT: remove(%d) from active list", track->name());
                     tracksToRemove->add(track);
+                    // indicate to client process that the track was disabled because of underrun;
+                    // it will then automatically call start() when data is available
+                    android_atomic_or(CBLK_DISABLED, &cblk->mFlags);
                 } else if (last) {
                     mixerStatus = MIXER_TRACKS_ENABLED;
                 }
@@ -4104,6 +4107,9 @@
                     ALOGV("OffloadThread: BUFFER TIMEOUT: remove(%d) from active list",
                           track->name());
                     tracksToRemove->add(track);
+                    // indicate to client process that the track was disabled because of underrun;
+                    // it will then automatically call start() when data is available
+                    android_atomic_or(CBLK_DISABLED, &cblk->mFlags);
                 } else if (last){
                     mixerStatus = MIXER_TRACKS_ENABLED;
                 }
@@ -4156,15 +4162,15 @@
 // must be called with thread mutex locked
 bool AudioFlinger::OffloadThread::shouldStandby_l()
 {
-    bool TrackPaused = false;
+    bool trackPaused = false;
 
     // do not put the HAL in standby when paused. AwesomePlayer clear the offloaded AudioTrack
     // after a timeout and we will enter standby then.
     if (mTracks.size() > 0) {
-        TrackPaused = mTracks[mTracks.size() - 1]->isPaused();
+        trackPaused = mTracks[mTracks.size() - 1]->isPaused();
     }
 
-    return !mStandby && !TrackPaused;
+    return !mStandby && !trackPaused;
 }
 
 
@@ -4249,6 +4255,7 @@
     for (size_t i = 0; i < outputTracks.size(); i++) {
         outputTracks[i]->write(mMixBuffer, writeFrames);
     }
+    mStandby = false;
     return (ssize_t)mixBufferSize;
 }
 
@@ -4400,8 +4407,6 @@
 
 bool AudioFlinger::RecordThread::threadLoop()
 {
-    AudioBufferProvider::Buffer buffer;
-
     nsecs_t lastWarning = 0;
 
     inputStandBy();
@@ -4445,7 +4450,7 @@
             // make a stable copy of mActiveTrack
             activeTrack = mActiveTrack;
             if (activeTrack == 0) {
-                standby();
+                standbyIfNotAlreadyInStandby();
                 // exitPending() can't become true here
                 releaseWakeLock_l();
                 ALOGV("RecordThread: loop stopping");
@@ -4465,7 +4470,7 @@
             activeTrackState = activeTrack->mState;
             switch (activeTrackState) {
             case TrackBase::PAUSING:
-                standby();
+                standbyIfNotAlreadyInStandby();
                 mActiveTrack.clear();
                 mStartStopCond.broadcast();
                 doSleep = true;
@@ -4511,6 +4516,7 @@
             effectChains[i]->process_l();
         }
 
+        AudioBufferProvider::Buffer buffer;
         buffer.frameCount = mFrameCount;
         status_t status = activeTrack->getNextBuffer(&buffer);
         if (status == NO_ERROR) {
@@ -4642,7 +4648,7 @@
         // effectChains doesn't need to be cleared, since it is cleared by destructor at scope end
     }
 
-    standby();
+    standbyIfNotAlreadyInStandby();
 
     {
         Mutex::Autolock _l(mLock);
@@ -4660,7 +4666,7 @@
     return false;
 }
 
-void AudioFlinger::RecordThread::standby()
+void AudioFlinger::RecordThread::standbyIfNotAlreadyInStandby()
 {
     if (!mStandby) {
         inputStandBy();
@@ -5207,7 +5213,7 @@
 
 void AudioFlinger::RecordThread::audioConfigChanged_l(int event, int param) {
     AudioSystem::OutputDescriptor desc;
-    void *param2 = NULL;
+    const void *param2 = NULL;
 
     switch (event) {
     case AudioSystem::INPUT_OPENED:
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index 8a859f5..cbc3827 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -931,7 +931,7 @@
             void    clearSyncStartEvent();
 
             // Enter standby if not already in standby, and set mStandby flag
-            void    standby();
+            void    standbyIfNotAlreadyInStandby();
 
             // Call the HAL standby method unconditionally, and don't change mStandby flag
             void    inputStandBy();
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index 9152ea3..d5178b1 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -861,6 +861,7 @@
                                         dstChain->strategy(),
                                         AUDIO_SESSION_OUTPUT_MIX,
                                         effect->id());
+            AudioSystem::setEffectEnabled(effect->id(), effect->isEnabled());
         }
         status = playbackThread->attachAuxEffect(this, EffectId);
     }
diff --git a/services/camera/libcameraservice/Android.mk b/services/camera/libcameraservice/Android.mk
index d23f8b9..51ba698 100644
--- a/services/camera/libcameraservice/Android.mk
+++ b/services/camera/libcameraservice/Android.mk
@@ -35,6 +35,7 @@
     device3/Camera3ZslStream.cpp \
     device3/StatusTracker.cpp \
     gui/RingBufferConsumer.cpp \
+    utils/CameraTraces.cpp \
 
 LOCAL_SHARED_LIBRARIES:= \
     libui \
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index 34a5b15..eeedfc9 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -42,6 +42,7 @@
 #include "api1/Camera2Client.h"
 #include "api_pro/ProCamera2Client.h"
 #include "api2/CameraDeviceClient.h"
+#include "utils/CameraTraces.h"
 #include "CameraDeviceFactory.h"
 
 namespace android {
@@ -1219,6 +1220,10 @@
 
         if (locked) mServiceLock.unlock();
 
+        // Dump camera traces if there were any
+        write(fd, "\n", 1);
+        camera3::CameraTraces::dump(fd, args);
+
         // change logging level
         int n = args.size();
         for (int i = 0; i + 1 < n; i++) {
diff --git a/services/camera/libcameraservice/api1/Camera2Client.cpp b/services/camera/libcameraservice/api1/Camera2Client.cpp
index df3b162..0b6ca5c 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.cpp
+++ b/services/camera/libcameraservice/api1/Camera2Client.cpp
@@ -76,13 +76,15 @@
         return res;
     }
 
-    SharedParameters::Lock l(mParameters);
+    {
+        SharedParameters::Lock l(mParameters);
 
-    res = l.mParameters.initialize(&(mDevice->info()));
-    if (res != OK) {
-        ALOGE("%s: Camera %d: unable to build defaults: %s (%d)",
-                __FUNCTION__, mCameraId, strerror(-res), res);
-        return NO_INIT;
+        res = l.mParameters.initialize(&(mDevice->info()));
+        if (res != OK) {
+            ALOGE("%s: Camera %d: unable to build defaults: %s (%d)",
+                    __FUNCTION__, mCameraId, strerror(-res), res);
+            return NO_INIT;
+        }
     }
 
     String8 threadName;
@@ -135,6 +137,7 @@
     mCallbackProcessor->run(threadName.string());
 
     if (gLogLevel >= 1) {
+        SharedParameters::Lock l(mParameters);
         ALOGD("%s: Default parameters converted from camera %d:", __FUNCTION__,
               mCameraId);
         ALOGD("%s", l.mParameters.paramsFlattened.string());
@@ -353,6 +356,10 @@
         result.appendFormat("    meteringCropRegion\n");
         haveQuirk = true;
     }
+    if (p.quirks.partialResults) {
+        result.appendFormat("    usePartialResult\n");
+        haveQuirk = true;
+    }
     if (!haveQuirk) {
         result.appendFormat("    none\n");
     }
diff --git a/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp b/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp
index c34cb12..19acae4 100644
--- a/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp
@@ -29,13 +29,27 @@
 namespace camera2 {
 
 FrameProcessor::FrameProcessor(wp<CameraDeviceBase> device,
-                               wp<Camera2Client> client) :
+                               sp<Camera2Client> client) :
     FrameProcessorBase(device),
     mClient(client),
-    mLastFrameNumberOfFaces(0) {
+    mLastFrameNumberOfFaces(0),
+    mLast3AFrameNumber(-1) {
 
     sp<CameraDeviceBase> d = device.promote();
     mSynthesize3ANotify = !(d->willNotify3A());
+
+    {
+        SharedParameters::Lock l(client->getParameters());
+        mUsePartialQuirk = l.mParameters.quirks.partialResults;
+
+        // Initialize starting 3A state
+        m3aState.afTriggerId = l.mParameters.afTriggerCounter;
+        m3aState.aeTriggerId = l.mParameters.precaptureTriggerCounter;
+        // Check if lens is fixed-focus
+        if (l.mParameters.focusMode == Parameters::FOCUS_MODE_FIXED) {
+            m3aState.afMode = ANDROID_CONTROL_AF_MODE_OFF;
+        }
+    }
 }
 
 FrameProcessor::~FrameProcessor() {
@@ -49,20 +63,25 @@
         return false;
     }
 
-    if (processFaceDetect(frame, client) != OK) {
+    bool partialResult = false;
+    if (mUsePartialQuirk) {
+        camera_metadata_entry_t entry;
+        entry = frame.find(ANDROID_QUIRKS_PARTIAL_RESULT);
+        if (entry.count > 0 &&
+                entry.data.u8[0] == ANDROID_QUIRKS_PARTIAL_RESULT_PARTIAL) {
+            partialResult = true;
+        }
+    }
+
+    if (!partialResult && processFaceDetect(frame, client) != OK) {
         return false;
     }
 
     if (mSynthesize3ANotify) {
-        // Ignoring missing fields for now
         process3aState(frame, client);
     }
 
-    if (!FrameProcessorBase::processSingleFrame(frame, device)) {
-        return false;
-    }
-
-    return true;
+    return FrameProcessorBase::processSingleFrame(frame, device);
 }
 
 status_t FrameProcessor::processFaceDetect(const CameraMetadata &frame,
@@ -198,86 +217,75 @@
 
     ATRACE_CALL();
     camera_metadata_ro_entry_t entry;
-    int mId = client->getCameraId();
+    int cameraId = client->getCameraId();
 
     entry = frame.find(ANDROID_REQUEST_FRAME_COUNT);
     int32_t frameNumber = entry.data.i32[0];
 
+    // Don't send 3A notifications for the same frame number twice
+    if (frameNumber <= mLast3AFrameNumber) {
+        ALOGV("%s: Already sent 3A for frame number %d, skipping",
+                __FUNCTION__, frameNumber);
+        return OK;
+    }
+
+    mLast3AFrameNumber = frameNumber;
+
     // Get 3A states from result metadata
     bool gotAllStates = true;
 
     AlgState new3aState;
 
-    entry = frame.find(ANDROID_CONTROL_AE_STATE);
-    if (entry.count == 0) {
-        ALOGE("%s: Camera %d: No AE state provided by HAL for frame %d!",
-                __FUNCTION__, mId, frameNumber);
-        gotAllStates = false;
-    } else {
-        new3aState.aeState =
-                static_cast<camera_metadata_enum_android_control_ae_state>(
-                    entry.data.u8[0]);
-    }
+    // TODO: Also use AE mode, AE trigger ID
 
-    entry = frame.find(ANDROID_CONTROL_AF_STATE);
-    if (entry.count == 0) {
-        ALOGE("%s: Camera %d: No AF state provided by HAL for frame %d!",
-                __FUNCTION__, mId, frameNumber);
-        gotAllStates = false;
-    } else {
-        new3aState.afState =
-                static_cast<camera_metadata_enum_android_control_af_state>(
-                    entry.data.u8[0]);
-    }
+    gotAllStates &= get3aResult<uint8_t>(frame, ANDROID_CONTROL_AF_MODE,
+            &new3aState.afMode, frameNumber, cameraId);
 
-    entry = frame.find(ANDROID_CONTROL_AWB_STATE);
-    if (entry.count == 0) {
-        ALOGE("%s: Camera %d: No AWB state provided by HAL for frame %d!",
-                __FUNCTION__, mId, frameNumber);
-        gotAllStates = false;
-    } else {
-        new3aState.awbState =
-                static_cast<camera_metadata_enum_android_control_awb_state>(
-                    entry.data.u8[0]);
-    }
+    gotAllStates &= get3aResult<uint8_t>(frame, ANDROID_CONTROL_AWB_MODE,
+            &new3aState.awbMode, frameNumber, cameraId);
 
-    int32_t afTriggerId = 0;
-    entry = frame.find(ANDROID_CONTROL_AF_TRIGGER_ID);
-    if (entry.count == 0) {
-        ALOGE("%s: Camera %d: No AF trigger ID provided by HAL for frame %d!",
-                __FUNCTION__, mId, frameNumber);
-        gotAllStates = false;
-    } else {
-        afTriggerId = entry.data.i32[0];
-    }
+    gotAllStates &= get3aResult<uint8_t>(frame, ANDROID_CONTROL_AE_STATE,
+            &new3aState.aeState, frameNumber, cameraId);
 
-    int32_t aeTriggerId = 0;
-    entry = frame.find(ANDROID_CONTROL_AE_PRECAPTURE_ID);
-    if (entry.count == 0) {
-        ALOGE("%s: Camera %d: No AE precapture trigger ID provided by HAL"
-                " for frame %d!",
-                __FUNCTION__, mId, frameNumber);
-        gotAllStates = false;
-    } else {
-        aeTriggerId = entry.data.i32[0];
-    }
+    gotAllStates &= get3aResult<uint8_t>(frame, ANDROID_CONTROL_AF_STATE,
+            &new3aState.afState, frameNumber, cameraId);
+
+    gotAllStates &= get3aResult<uint8_t>(frame, ANDROID_CONTROL_AWB_STATE,
+            &new3aState.awbState, frameNumber, cameraId);
+
+    gotAllStates &= get3aResult<int32_t>(frame, ANDROID_CONTROL_AF_TRIGGER_ID,
+            &new3aState.afTriggerId, frameNumber, cameraId);
+
+    gotAllStates &= get3aResult<int32_t>(frame, ANDROID_CONTROL_AE_PRECAPTURE_ID,
+            &new3aState.aeTriggerId, frameNumber, cameraId);
 
     if (!gotAllStates) return BAD_VALUE;
 
     if (new3aState.aeState != m3aState.aeState) {
-        ALOGV("%s: AE state changed from 0x%x to 0x%x",
-                __FUNCTION__, m3aState.aeState, new3aState.aeState);
-        client->notifyAutoExposure(new3aState.aeState, aeTriggerId);
+        ALOGV("%s: Camera %d: AE state %d->%d",
+                __FUNCTION__, cameraId,
+                m3aState.aeState, new3aState.aeState);
+        client->notifyAutoExposure(new3aState.aeState, new3aState.aeTriggerId);
     }
-    if (new3aState.afState != m3aState.afState) {
-        ALOGV("%s: AF state changed from 0x%x to 0x%x",
-                __FUNCTION__, m3aState.afState, new3aState.afState);
-        client->notifyAutoFocus(new3aState.afState, afTriggerId);
+
+    if (new3aState.afState != m3aState.afState ||
+        new3aState.afMode != m3aState.afMode ||
+        new3aState.afTriggerId != m3aState.afTriggerId) {
+        ALOGV("%s: Camera %d: AF state %d->%d. AF mode %d->%d. Trigger %d->%d",
+                __FUNCTION__, cameraId,
+                m3aState.afState, new3aState.afState,
+                m3aState.afMode, new3aState.afMode,
+                m3aState.afTriggerId, new3aState.afTriggerId);
+        client->notifyAutoFocus(new3aState.afState, new3aState.afTriggerId);
     }
-    if (new3aState.awbState != m3aState.awbState) {
-        ALOGV("%s: AWB state changed from 0x%x to 0x%x",
-                __FUNCTION__, m3aState.awbState, new3aState.awbState);
-        client->notifyAutoWhitebalance(new3aState.awbState, aeTriggerId);
+    if (new3aState.awbState != m3aState.awbState ||
+        new3aState.awbMode != m3aState.awbMode) {
+        ALOGV("%s: Camera %d: AWB state %d->%d. AWB mode %d->%d",
+                __FUNCTION__, cameraId,
+                m3aState.awbState, new3aState.awbState,
+                m3aState.awbMode, new3aState.awbMode);
+        client->notifyAutoWhitebalance(new3aState.awbState,
+                new3aState.aeTriggerId);
     }
 
     m3aState = new3aState;
@@ -285,6 +293,39 @@
     return OK;
 }
 
+template<typename Src, typename T>
+bool FrameProcessor::get3aResult(const CameraMetadata& result, int32_t tag,
+        T* value, int32_t frameNumber, int cameraId) {
+    camera_metadata_ro_entry_t entry;
+    if (value == NULL) {
+        ALOGE("%s: Camera %d: Value to write to is NULL",
+                __FUNCTION__, cameraId);
+        return false;
+    }
+
+    entry = result.find(tag);
+    if (entry.count == 0) {
+        ALOGE("%s: Camera %d: No %s provided by HAL for frame %d!",
+                __FUNCTION__, cameraId,
+                get_camera_metadata_tag_name(tag), frameNumber);
+        return false;
+    } else {
+        switch(sizeof(Src)){
+            case sizeof(uint8_t):
+                *value = static_cast<T>(entry.data.u8[0]);
+                break;
+            case sizeof(int32_t):
+                *value = static_cast<T>(entry.data.i32[0]);
+                break;
+            default:
+                ALOGE("%s: Camera %d: Unsupported source",
+                        __FUNCTION__, cameraId);
+                return false;
+        }
+    }
+    return true;
+}
+
 
 void FrameProcessor::callbackFaceDetection(sp<Camera2Client> client,
                                      const camera_frame_metadata &metadata) {
diff --git a/services/camera/libcameraservice/api1/client2/FrameProcessor.h b/services/camera/libcameraservice/api1/client2/FrameProcessor.h
index 2a17d45..856ad32 100644
--- a/services/camera/libcameraservice/api1/client2/FrameProcessor.h
+++ b/services/camera/libcameraservice/api1/client2/FrameProcessor.h
@@ -39,7 +39,7 @@
  */
 class FrameProcessor : public FrameProcessorBase {
   public:
-    FrameProcessor(wp<CameraDeviceBase> device, wp<Camera2Client> client);
+    FrameProcessor(wp<CameraDeviceBase> device, sp<Camera2Client> client);
     ~FrameProcessor();
 
   private:
@@ -61,18 +61,44 @@
     status_t process3aState(const CameraMetadata &frame,
             const sp<Camera2Client> &client);
 
+    // Helper for process3aState
+    template<typename Src, typename T>
+    bool get3aResult(const CameraMetadata& result, int32_t tag, T* value,
+            int32_t frameNumber, int cameraId);
+
+
     struct AlgState {
+        // TODO: also track AE mode
+        camera_metadata_enum_android_control_af_mode   afMode;
+        camera_metadata_enum_android_control_awb_mode  awbMode;
+
         camera_metadata_enum_android_control_ae_state  aeState;
         camera_metadata_enum_android_control_af_state  afState;
         camera_metadata_enum_android_control_awb_state awbState;
 
+        int32_t                                        afTriggerId;
+        int32_t                                        aeTriggerId;
+
+        // These defaults need to match those in Parameters.cpp
         AlgState() :
+                afMode(ANDROID_CONTROL_AF_MODE_AUTO),
+                awbMode(ANDROID_CONTROL_AWB_MODE_AUTO),
                 aeState(ANDROID_CONTROL_AE_STATE_INACTIVE),
                 afState(ANDROID_CONTROL_AF_STATE_INACTIVE),
-                awbState(ANDROID_CONTROL_AWB_STATE_INACTIVE) {
+                awbState(ANDROID_CONTROL_AWB_STATE_INACTIVE),
+                afTriggerId(0),
+                aeTriggerId(0) {
         }
     } m3aState;
 
+    // Whether the partial result quirk is enabled for this device
+    bool mUsePartialQuirk;
+
+    // Track most recent frame number for which 3A notifications were sent for.
+    // Used to filter against sending 3A notifications for the same frame
+    // several times.
+    int32_t mLast3AFrameNumber;
+
     // Emit FaceDetection event to java if faces changed
     void callbackFaceDetection(sp<Camera2Client> client,
                                const camera_frame_metadata &metadata);
diff --git a/services/camera/libcameraservice/api1/client2/Parameters.cpp b/services/camera/libcameraservice/api1/client2/Parameters.cpp
index 8a4e75c..1e425ba 100644
--- a/services/camera/libcameraservice/api1/client2/Parameters.cpp
+++ b/services/camera/libcameraservice/api1/client2/Parameters.cpp
@@ -1047,6 +1047,11 @@
     ALOGV_IF(quirks.meteringCropRegion, "Camera %d: Quirk meteringCropRegion"
                 " enabled", cameraId);
 
+    entry = info->find(ANDROID_QUIRKS_USE_PARTIAL_RESULT);
+    quirks.partialResults = (entry.count != 0 && entry.data.u8[0] == 1);
+    ALOGV_IF(quirks.partialResults, "Camera %d: Quirk usePartialResult"
+                " enabled", cameraId);
+
     return OK;
 }
 
diff --git a/services/camera/libcameraservice/api1/client2/Parameters.h b/services/camera/libcameraservice/api1/client2/Parameters.h
index bcbdb99..93ab113 100644
--- a/services/camera/libcameraservice/api1/client2/Parameters.h
+++ b/services/camera/libcameraservice/api1/client2/Parameters.h
@@ -209,6 +209,7 @@
         bool triggerAfWithAuto;
         bool useZslFormat;
         bool meteringCropRegion;
+        bool partialResults;
     } quirks;
 
     /**
diff --git a/services/camera/libcameraservice/common/FrameProcessorBase.cpp b/services/camera/libcameraservice/common/FrameProcessorBase.cpp
index 52906ee..b2c9b33 100644
--- a/services/camera/libcameraservice/common/FrameProcessorBase.cpp
+++ b/services/camera/libcameraservice/common/FrameProcessorBase.cpp
@@ -145,6 +145,15 @@
     ATRACE_CALL();
     camera_metadata_ro_entry_t entry;
 
+    // Quirks: Don't deliver partial results to listeners
+    entry = frame.find(ANDROID_QUIRKS_PARTIAL_RESULT);
+    if (entry.count != 0 &&
+            entry.data.u8[0] == ANDROID_QUIRKS_PARTIAL_RESULT_PARTIAL) {
+        ALOGV("%s: Camera %d: Not forwarding partial result to listeners",
+                __FUNCTION__, device->getId());
+        return OK;
+    }
+
     entry = frame.find(ANDROID_REQUEST_ID);
     if (entry.count == 0) {
         ALOGE("%s: Camera %d: Error reading frame id",
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index 6f2dc85..cb72e0e 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -41,6 +41,7 @@
 #include <utils/Trace.h>
 #include <utils/Timers.h>
 
+#include "utils/CameraTraces.h"
 #include "device3/Camera3Device.h"
 #include "device3/Camera3OutputStream.h"
 #include "device3/Camera3InputStream.h"
@@ -54,6 +55,7 @@
         mId(id),
         mHal3Device(NULL),
         mStatus(STATUS_UNINITIALIZED),
+        mUsePartialResultQuirk(false),
         mNextResultFrameNumber(0),
         mNextShutterFrameNumber(0),
         mListener(NULL)
@@ -192,6 +194,15 @@
     mNeedConfig = true;
     mPauseStateNotify = false;
 
+    /** Check for quirks */
+
+    // Will the HAL be sending in early partial result metadata?
+    camera_metadata_entry partialResultsQuirk =
+            mDeviceInfo.find(ANDROID_QUIRKS_USE_PARTIAL_RESULT);
+    if (partialResultsQuirk.count > 0 && partialResultsQuirk.data.u8[0] == 1) {
+        mUsePartialResultQuirk = true;
+    }
+
     return OK;
 }
 
@@ -1363,6 +1374,10 @@
     // But only do error state transition steps for the first error
     if (mStatus == STATUS_ERROR || mStatus == STATUS_UNINITIALIZED) return;
 
+    // Save stack trace. View by dumping it later.
+    CameraTraces::saveTrace();
+    // TODO: consider adding errorCause and client pid/procname
+
     mErrorCause = errorCause;
 
     mRequestThread->setPaused(true);
@@ -1386,6 +1401,168 @@
 }
 
 /**
+ * QUIRK(partial results)
+ * Check if all 3A fields are ready, and send off a partial 3A-only result
+ * to the output frame queue
+ */
+bool Camera3Device::processPartial3AQuirk(int32_t frameNumber,
+        const CameraMetadata& partial) {
+
+    // Check if all 3A states are present
+    // The full list of fields is
+    //   android.control.afMode
+    //   android.control.awbMode
+    //   android.control.aeState
+    //   android.control.awbState
+    //   android.control.afState
+    //   android.control.afTriggerID
+    //   android.control.aePrecaptureID
+    // TODO: Add android.control.aeMode
+
+    bool gotAllStates = true;
+
+    uint8_t afMode;
+    uint8_t awbMode;
+    uint8_t aeState;
+    uint8_t afState;
+    uint8_t awbState;
+    int32_t afTriggerId;
+    int32_t aeTriggerId;
+
+    gotAllStates &= get3AResult(partial, ANDROID_CONTROL_AF_MODE,
+        &afMode, frameNumber);
+
+    gotAllStates &= get3AResult(partial, ANDROID_CONTROL_AWB_MODE,
+        &awbMode, frameNumber);
+
+    gotAllStates &= get3AResult(partial, ANDROID_CONTROL_AE_STATE,
+        &aeState, frameNumber);
+
+    gotAllStates &= get3AResult(partial, ANDROID_CONTROL_AF_STATE,
+        &afState, frameNumber);
+
+    gotAllStates &= get3AResult(partial, ANDROID_CONTROL_AWB_STATE,
+        &awbState, frameNumber);
+
+    gotAllStates &= get3AResult(partial, ANDROID_CONTROL_AF_TRIGGER_ID,
+        &afTriggerId, frameNumber);
+
+    gotAllStates &= get3AResult(partial, ANDROID_CONTROL_AE_PRECAPTURE_ID,
+        &aeTriggerId, frameNumber);
+
+    if (!gotAllStates) return false;
+
+    ALOGVV("%s: Camera %d: Frame %d: AF mode %d, AWB mode %d, "
+        "AF state %d, AE state %d, AWB state %d, "
+        "AF trigger %d, AE precapture trigger %d",
+        __FUNCTION__, mId, frameNumber,
+        afMode, awbMode,
+        afState, aeState, awbState,
+        afTriggerId, aeTriggerId);
+
+    // Got all states, so construct a minimal result to send
+    // In addition to the above fields, this means adding in
+    //   android.request.frameCount
+    //   android.quirks.partialResult
+
+    const size_t kMinimal3AResultEntries = 7;
+
+    Mutex::Autolock l(mOutputLock);
+
+    CameraMetadata& min3AResult =
+            *mResultQueue.insert(
+                mResultQueue.end(),
+                CameraMetadata(kMinimal3AResultEntries, /*dataCapacity*/ 0));
+
+    if (!insert3AResult(min3AResult, ANDROID_REQUEST_FRAME_COUNT,
+            &frameNumber, frameNumber)) {
+        return false;
+    }
+
+    static const uint8_t partialResult = ANDROID_QUIRKS_PARTIAL_RESULT_PARTIAL;
+    if (!insert3AResult(min3AResult, ANDROID_QUIRKS_PARTIAL_RESULT,
+            &partialResult, frameNumber)) {
+        return false;
+    }
+
+    if (!insert3AResult(min3AResult, ANDROID_CONTROL_AF_MODE,
+            &afMode, frameNumber)) {
+        return false;
+    }
+
+    if (!insert3AResult(min3AResult, ANDROID_CONTROL_AWB_MODE,
+            &awbMode, frameNumber)) {
+        return false;
+    }
+
+    if (!insert3AResult(min3AResult, ANDROID_CONTROL_AE_STATE,
+            &aeState, frameNumber)) {
+        return false;
+    }
+
+    if (!insert3AResult(min3AResult, ANDROID_CONTROL_AF_STATE,
+            &afState, frameNumber)) {
+        return false;
+    }
+
+    if (!insert3AResult(min3AResult, ANDROID_CONTROL_AWB_STATE,
+            &awbState, frameNumber)) {
+        return false;
+    }
+
+    if (!insert3AResult(min3AResult, ANDROID_CONTROL_AF_TRIGGER_ID,
+            &afTriggerId, frameNumber)) {
+        return false;
+    }
+
+    if (!insert3AResult(min3AResult, ANDROID_CONTROL_AE_PRECAPTURE_ID,
+            &aeTriggerId, frameNumber)) {
+        return false;
+    }
+
+    mResultSignal.signal();
+
+    return true;
+}
+
+template<typename T>
+bool Camera3Device::get3AResult(const CameraMetadata& result, int32_t tag,
+        T* value, int32_t frameNumber) {
+    (void) frameNumber;
+
+    camera_metadata_ro_entry_t entry;
+
+    entry = result.find(tag);
+    if (entry.count == 0) {
+        ALOGVV("%s: Camera %d: Frame %d: No %s provided by HAL!", __FUNCTION__,
+            mId, frameNumber, get_camera_metadata_tag_name(tag));
+        return false;
+    }
+
+    if (sizeof(T) == sizeof(uint8_t)) {
+        *value = entry.data.u8[0];
+    } else if (sizeof(T) == sizeof(int32_t)) {
+        *value = entry.data.i32[0];
+    } else {
+        ALOGE("%s: Unexpected type", __FUNCTION__);
+        return false;
+    }
+    return true;
+}
+
+template<typename T>
+bool Camera3Device::insert3AResult(CameraMetadata& result, int32_t tag,
+        const T* value, int32_t frameNumber) {
+    if (result.update(tag, value, 1) != NO_ERROR) {
+        mResultQueue.erase(--mResultQueue.end(), mResultQueue.end());
+        SET_ERR("Frame %d: Failed to set %s in partial metadata",
+                frameNumber, get_camera_metadata_tag_name(tag));
+        return false;
+    }
+    return true;
+}
+
+/**
  * Camera HAL device callback methods
  */
 
@@ -1400,6 +1577,8 @@
                 frameNumber);
         return;
     }
+    bool partialResultQuirk = false;
+    CameraMetadata collectedQuirkResult;
 
     // Get capture timestamp from list of in-flight requests, where it was added
     // by the shutter notification for this frame. Then update the in-flight
@@ -1415,24 +1594,57 @@
             return;
         }
         InFlightRequest &request = mInFlightMap.editValueAt(idx);
+
+        // Check if this result carries only partial metadata
+        if (mUsePartialResultQuirk && result->result != NULL) {
+            camera_metadata_ro_entry_t partialResultEntry;
+            res = find_camera_metadata_ro_entry(result->result,
+                    ANDROID_QUIRKS_PARTIAL_RESULT, &partialResultEntry);
+            if (res != NAME_NOT_FOUND &&
+                    partialResultEntry.count > 0 &&
+                    partialResultEntry.data.u8[0] ==
+                    ANDROID_QUIRKS_PARTIAL_RESULT_PARTIAL) {
+                // A partial result. Flag this as such, and collect this
+                // set of metadata into the in-flight entry.
+                partialResultQuirk = true;
+                request.partialResultQuirk.collectedResult.append(
+                    result->result);
+                request.partialResultQuirk.collectedResult.erase(
+                    ANDROID_QUIRKS_PARTIAL_RESULT);
+                // Fire off a 3A-only result if possible
+                if (!request.partialResultQuirk.haveSent3A) {
+                    request.partialResultQuirk.haveSent3A =
+                            processPartial3AQuirk(frameNumber,
+                                request.partialResultQuirk.collectedResult);
+                }
+            }
+        }
+
         timestamp = request.captureTimestamp;
         /**
-         * One of the following must happen before it's legal to call process_capture_result:
+         * One of the following must happen before it's legal to call process_capture_result,
+         * unless partial metadata is being provided:
          * - CAMERA3_MSG_SHUTTER (expected during normal operation)
          * - CAMERA3_MSG_ERROR (expected during flush)
          */
-        if (request.requestStatus == OK && timestamp == 0) {
+        if (request.requestStatus == OK && timestamp == 0 && !partialResultQuirk) {
             SET_ERR("Called before shutter notify for frame %d",
                     frameNumber);
             return;
         }
 
-        if (result->result != NULL) {
+        // Did we get the (final) result metadata for this capture?
+        if (result->result != NULL && !partialResultQuirk) {
             if (request.haveResultMetadata) {
                 SET_ERR("Called multiple times with metadata for frame %d",
                         frameNumber);
                 return;
             }
+            if (mUsePartialResultQuirk &&
+                    !request.partialResultQuirk.collectedResult.isEmpty()) {
+                collectedQuirkResult.acquire(
+                    request.partialResultQuirk.collectedResult);
+            }
             request.haveResultMetadata = true;
         }
 
@@ -1444,6 +1656,7 @@
             return;
         }
 
+        // Check if everything has arrived for this result (buffers and metadata)
         if (request.haveResultMetadata && request.numBuffersLeft == 0) {
             ATRACE_ASYNC_END("frame capture", frameNumber);
             mInFlightMap.removeItemsAt(idx, 1);
@@ -1458,9 +1671,12 @@
     }
 
     // Process the result metadata, if provided
-    if (result->result != NULL) {
+    bool gotResult = false;
+    if (result->result != NULL && !partialResultQuirk) {
         Mutex::Autolock l(mOutputLock);
 
+        gotResult = true;
+
         if (frameNumber != mNextResultFrameNumber) {
             SET_ERR("Out-of-order capture result metadata submitted! "
                     "(got frame number %d, expecting %d)",
@@ -1469,19 +1685,26 @@
         }
         mNextResultFrameNumber++;
 
-        CameraMetadata &captureResult =
-                *mResultQueue.insert(mResultQueue.end(), CameraMetadata());
-
+        CameraMetadata captureResult;
         captureResult = result->result;
+
         if (captureResult.update(ANDROID_REQUEST_FRAME_COUNT,
                         (int32_t*)&frameNumber, 1) != OK) {
             SET_ERR("Failed to set frame# in metadata (%d)",
                     frameNumber);
+            gotResult = false;
         } else {
             ALOGVV("%s: Camera %d: Set frame# in metadata (%d)",
                     __FUNCTION__, mId, frameNumber);
         }
 
+        // Append any previous partials to form a complete result
+        if (mUsePartialResultQuirk && !collectedQuirkResult.isEmpty()) {
+            captureResult.append(collectedQuirkResult);
+        }
+
+        captureResult.sort();
+
         // Check that there's a timestamp in the result metadata
 
         camera_metadata_entry entry =
@@ -1489,10 +1712,19 @@
         if (entry.count == 0) {
             SET_ERR("No timestamp provided by HAL for frame %d!",
                     frameNumber);
+            gotResult = false;
         } else if (timestamp != entry.data.i64[0]) {
             SET_ERR("Timestamp mismatch between shutter notify and result"
                     " metadata for frame %d (%lld vs %lld respectively)",
                     frameNumber, timestamp, entry.data.i64[0]);
+            gotResult = false;
+        }
+
+        if (gotResult) {
+            // Valid result, insert into queue
+            CameraMetadata& queuedResult =
+                *mResultQueue.insert(mResultQueue.end(), CameraMetadata());
+            queuedResult.swap(captureResult);
         }
     } // scope for mOutputLock
 
@@ -1512,7 +1744,7 @@
 
     // Finally, signal any waiters for new frames
 
-    if (result->result != NULL) {
+    if (gotResult) {
         mResultSignal.signal();
     }
 
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index 12252c8..4a24a88 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -188,6 +188,9 @@
     // Need to hold on to stream references until configure completes.
     Vector<sp<camera3::Camera3StreamInterface> > mDeletedStreams;
 
+    // Whether quirk ANDROID_QUIRKS_USE_PARTIAL_RESULT is enabled
+    bool                       mUsePartialResultQuirk;
+
     /**** End scope for mLock ****/
 
     class CaptureRequest : public LightRefBase<CaptureRequest> {
@@ -445,6 +448,18 @@
         // buffers
         int     numBuffersLeft;
 
+        // Fields used by the partial result quirk only
+        struct PartialResultQuirkInFlight {
+            // Set by process_capture_result once 3A has been sent to clients
+            bool    haveSent3A;
+            // Result metadata collected so far, when partial results are in use
+            CameraMetadata collectedResult;
+
+            PartialResultQuirkInFlight():
+                    haveSent3A(false) {
+            }
+        } partialResultQuirk;
+
         // Default constructor needed by KeyedVector
         InFlightRequest() :
                 requestId(0),
@@ -472,6 +487,21 @@
             int32_t numBuffers);
 
     /**
+     * For the partial result quirk, check if all 3A state fields are available
+     * and if so, queue up 3A-only result to the client. Returns true if 3A
+     * is sent.
+     */
+    bool processPartial3AQuirk(int32_t frameNumber, const CameraMetadata& partial);
+
+    // Helpers for reading and writing 3A metadata into to/from partial results
+    template<typename T>
+    bool get3AResult(const CameraMetadata& result, int32_t tag,
+            T* value, int32_t frameNumber);
+
+    template<typename T>
+    bool insert3AResult(CameraMetadata &result, int32_t tag, const T* value,
+            int32_t frameNumber);
+    /**
      * Tracking for idle detection
      */
     sp<camera3::StatusTracker> mStatusTracker;
diff --git a/services/camera/libcameraservice/utils/CameraTraces.cpp b/services/camera/libcameraservice/utils/CameraTraces.cpp
new file mode 100644
index 0000000..346e15f
--- /dev/null
+++ b/services/camera/libcameraservice/utils/CameraTraces.cpp
@@ -0,0 +1,94 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "CameraTraces"
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+//#define LOG_NDEBUG 0
+
+#include "utils/CameraTraces.h"
+#include <utils/ProcessCallStack.h>
+
+#include <utils/Mutex.h>
+#include <utils/List.h>
+
+#include <utils/Log.h>
+#include <cutils/trace.h>
+
+namespace android {
+namespace camera3 {
+
+struct CameraTracesImpl {
+    Mutex                    tracesLock;
+    List<ProcessCallStack>   pcsList;
+}; // class CameraTraces::Impl;
+
+static CameraTracesImpl gImpl;
+CameraTracesImpl& CameraTraces::sImpl = gImpl;
+
+void CameraTraces::saveTrace() {
+    ALOGV("%s: begin", __FUNCTION__);
+    ATRACE_BEGIN("CameraTraces::saveTrace");
+    Mutex::Autolock al(sImpl.tracesLock);
+
+    List<ProcessCallStack>& pcsList = sImpl.pcsList;
+
+    // Insert new ProcessCallStack, and immediately crawl all the threads
+    pcsList.push_front(ProcessCallStack());
+    ProcessCallStack& pcs = *pcsList.begin();
+    pcs.update();
+
+    if (pcsList.size() > MAX_TRACES) {
+        // Prune list periodically and discard oldest entry
+        pcsList.erase(--pcsList.end());
+    }
+
+    IF_ALOGV() {
+        pcs.log(LOG_TAG, ANDROID_LOG_VERBOSE);
+    }
+
+    ALOGD("Process trace saved. Use dumpsys media.camera to view.");
+
+    ATRACE_END();
+}
+
+status_t CameraTraces::dump(int fd, const Vector<String16> &args __attribute__((unused))) {
+    ALOGV("%s: fd = %d", __FUNCTION__, fd);
+    Mutex::Autolock al(sImpl.tracesLock);
+    List<ProcessCallStack>& pcsList = sImpl.pcsList;
+
+    if (fd < 0) {
+        ALOGW("%s: Negative FD (%d)", __FUNCTION__, fd);
+        return BAD_VALUE;
+    }
+
+    fdprintf(fd, "Camera traces (%zu):\n", pcsList.size());
+
+    if (pcsList.empty()) {
+        fdprintf(fd, "  No camera traces collected.\n");
+    }
+
+    // Print newest items first
+    List<ProcessCallStack>::iterator it, end;
+    for (it = pcsList.begin(), end = pcsList.end(); it != end; ++it) {
+        const ProcessCallStack& pcs = *it;
+        pcs.dump(fd, DUMP_INDENT);
+    }
+
+    return OK;
+}
+
+}; // namespace camera3
+}; // namespace android
diff --git a/services/camera/libcameraservice/utils/CameraTraces.h b/services/camera/libcameraservice/utils/CameraTraces.h
new file mode 100644
index 0000000..d10dbc9
--- /dev/null
+++ b/services/camera/libcameraservice/utils/CameraTraces.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA_TRACES_H_
+#define ANDROID_SERVERS_CAMERA_TRACES_H_
+
+#include <utils/Errors.h>
+#include <utils/String16.h>
+#include <utils/Vector.h>
+
+namespace android {
+namespace camera3 {
+
+class CameraTracesImpl;
+
+// Collect a list of the process's stack traces
+class CameraTraces {
+public:
+    /**
+     * Save the current stack trace for each thread in the process. At most
+     * MAX_TRACES will be saved, after which the oldest traces will be discarded.
+     *
+     * <p>Use CameraTraces::dump to print out the traces.</p>
+     */
+    static void     saveTrace();
+
+    /**
+     * Prints all saved traces to the specified file descriptor.
+     *
+     * <p>Each line is indented by DUMP_INDENT spaces.</p>
+     */
+    static status_t dump(int fd, const Vector<String16>& args);
+
+private:
+    enum {
+        // Don't collect more than 100 traces. Discard oldest.
+        MAX_TRACES = 100,
+
+        // Insert 2 spaces when dumping the traces
+        DUMP_INDENT = 2,
+    };
+
+    CameraTraces();
+    ~CameraTraces();
+    CameraTraces(CameraTraces& rhs);
+
+    static CameraTracesImpl& sImpl;
+}; // class CameraTraces
+
+}; // namespace camera3
+}; // namespace android
+
+#endif // ANDROID_SERVERS_CAMERA_TRACES_H_