Merge "Revert "Revert "disable AwesomePlayer for Ogg vorbis""" into lmp-dev
diff --git a/camera/CameraUtils.cpp b/camera/CameraUtils.cpp
index 1ff63ab..04244ac 100644
--- a/camera/CameraUtils.cpp
+++ b/camera/CameraUtils.cpp
@@ -73,23 +73,23 @@
                 return INVALID_OPERATION;
         }
     } else {
-        // Front camera needs to be horizontally flipped for
-        // mirror-like behavior.
-        // Note: Flips are applied before rotates.
+        // Front camera needs to be horizontally flipped for mirror-like behavior.
+        // Note: Flips are applied before rotates; using XOR here as some of these flags are
+        // composed in terms of other flip/rotation flags, and are not bitwise-ORable.
         switch (orientation) {
             case 0:
                 flags = NATIVE_WINDOW_TRANSFORM_FLIP_H;
                 break;
             case 90:
-                flags = NATIVE_WINDOW_TRANSFORM_FLIP_H |
+                flags = NATIVE_WINDOW_TRANSFORM_FLIP_H ^
                         NATIVE_WINDOW_TRANSFORM_ROT_270;
                 break;
             case 180:
-                flags = NATIVE_WINDOW_TRANSFORM_FLIP_H |
+                flags = NATIVE_WINDOW_TRANSFORM_FLIP_H ^
                         NATIVE_WINDOW_TRANSFORM_ROT_180;
                 break;
             case 270:
-                flags = NATIVE_WINDOW_TRANSFORM_FLIP_H |
+                flags = NATIVE_WINDOW_TRANSFORM_FLIP_H ^
                         NATIVE_WINDOW_TRANSFORM_ROT_90;
 
                 break;
diff --git a/include/camera/camera2/ICameraDeviceCallbacks.h b/include/camera/camera2/ICameraDeviceCallbacks.h
index f059b3d..670480b 100644
--- a/include/camera/camera2/ICameraDeviceCallbacks.h
+++ b/include/camera/camera2/ICameraDeviceCallbacks.h
@@ -42,9 +42,13 @@
      * Error codes for CAMERA_MSG_ERROR
      */
     enum CameraErrorCode {
+        ERROR_CAMERA_INVALID_ERROR = -1, // To indicate all invalid error codes
         ERROR_CAMERA_DISCONNECTED = 0,
         ERROR_CAMERA_DEVICE = 1,
-        ERROR_CAMERA_SERVICE = 2
+        ERROR_CAMERA_SERVICE = 2,
+        ERROR_CAMERA_REQUEST = 3,
+        ERROR_CAMERA_RESULT = 4,
+        ERROR_CAMERA_BUFFER = 5,
     };
 
     // One way
diff --git a/media/libmedia/CharacterEncodingDetector.h b/include/media/CharacterEncodingDetector.h
similarity index 96%
rename from media/libmedia/CharacterEncodingDetector.h
rename to include/media/CharacterEncodingDetector.h
index 7b5ed86..deaa377 100644
--- a/media/libmedia/CharacterEncodingDetector.h
+++ b/include/media/CharacterEncodingDetector.h
@@ -43,7 +43,7 @@
         const UCharsetMatch *getPreferred(
                 const char *input, size_t len,
                 const UCharsetMatch** ucma, size_t matches,
-                bool *goodmatch);
+                bool *goodmatch, int *highestmatch);
 
         bool isFrequent(const uint16_t *values, uint32_t c);
 
diff --git a/media/libmedia/StringArray.h b/include/media/StringArray.h
similarity index 100%
rename from media/libmedia/StringArray.h
rename to include/media/StringArray.h
diff --git a/include/media/mediaplayer.h b/include/media/mediaplayer.h
index 2442219..9cc208e 100644
--- a/include/media/mediaplayer.h
+++ b/include/media/mediaplayer.h
@@ -278,6 +278,7 @@
     bool                        mPrepareSync;
     status_t                    mPrepareStatus;
     audio_stream_type_t         mStreamType;
+    Parcel*                     mAudioAttributesParcel;
     bool                        mLoop;
     float                       mLeftVolume;
     float                       mRightVolume;
diff --git a/include/media/mediascanner.h b/include/media/mediascanner.h
index 5213bdc..d555279 100644
--- a/include/media/mediascanner.h
+++ b/include/media/mediascanner.h
@@ -122,7 +122,6 @@
 protected:
     // default encoding from MediaScanner::mLocale
     String8 mLocale;
-    CharacterEncodingDetector *mEncodingDetector;
 };
 
 }; // namespace android
diff --git a/include/media/stagefright/ACodec.h b/include/media/stagefright/ACodec.h
index eb31c77..da4c20c 100644
--- a/include/media/stagefright/ACodec.h
+++ b/include/media/stagefright/ACodec.h
@@ -234,7 +234,7 @@
     status_t setComponentRole(bool isEncoder, const char *mime);
     status_t configureCodec(const char *mime, const sp<AMessage> &msg);
 
-    status_t configureTunneledVideoPlayback(int64_t audioHwSync,
+    status_t configureTunneledVideoPlayback(int32_t audioHwSync,
             const sp<ANativeWindow> &nativeWindow);
 
     status_t setVideoPortFormatType(
diff --git a/include/media/stagefright/DataSource.h b/include/media/stagefright/DataSource.h
index 3fb9e36..8000e84 100644
--- a/include/media/stagefright/DataSource.h
+++ b/include/media/stagefright/DataSource.h
@@ -48,7 +48,7 @@
             const sp<IMediaHTTPService> &httpService,
             const char *uri,
             const KeyedVector<String8, String8> *headers = NULL,
-            AString *sniffedMIME = NULL);
+            String8 *contentType = NULL);
 
     DataSource() {}
 
@@ -102,10 +102,6 @@
     virtual ~DataSource() {}
 
 private:
-    enum {
-        kDefaultMetaSize = 200000,
-    };
-
     static Mutex gSnifferMutex;
     static List<SnifferFunc> gSniffers;
     static bool gSniffersRegistered;
diff --git a/include/media/stagefright/foundation/ALooperRoster.h b/include/media/stagefright/foundation/ALooperRoster.h
index 940fc55..4d76b64 100644
--- a/include/media/stagefright/foundation/ALooperRoster.h
+++ b/include/media/stagefright/foundation/ALooperRoster.h
@@ -56,8 +56,6 @@
 
     KeyedVector<uint32_t, sp<AMessage> > mReplies;
 
-    status_t postMessage_l(const sp<AMessage> &msg, int64_t delayUs);
-
     DISALLOW_EVIL_CONSTRUCTORS(ALooperRoster);
 };
 
diff --git a/media/libmedia/Android.mk b/media/libmedia/Android.mk
index 3be0651..e012116 100644
--- a/media/libmedia/Android.mk
+++ b/media/libmedia/Android.mk
@@ -70,15 +70,16 @@
 
 LOCAL_STATIC_LIBRARIES += libinstantssq
 
-LOCAL_WHOLE_STATIC_LIBRARY := libmedia_helper
+LOCAL_WHOLE_STATIC_LIBRARIES := libmedia_helper
 
 LOCAL_MODULE:= libmedia
 
 LOCAL_C_INCLUDES := \
     $(TOP)/frameworks/native/include/media/openmax \
+    $(TOP)/frameworks/av/include/media/ \
     $(TOP)/frameworks/av/media/libstagefright \
-    external/icu/icu4c/source/common \
-    external/icu/icu4c/source/i18n \
+    $(TOP)/external/icu/icu4c/source/common \
+    $(TOP)/external/icu/icu4c/source/i18n \
     $(call include-path-for, audio-effects) \
     $(call include-path-for, audio-utils)
 
diff --git a/media/libmedia/CharacterEncodingDetector.cpp b/media/libmedia/CharacterEncodingDetector.cpp
index 7d1ddfd..41994dc 100644
--- a/media/libmedia/CharacterEncodingDetector.cpp
+++ b/media/libmedia/CharacterEncodingDetector.cpp
@@ -18,7 +18,7 @@
 #define LOG_TAG "CharacterEncodingDector"
 #include <utils/Log.h>
 
-#include "CharacterEncodingDetector.h"
+#include <CharacterEncodingDetector.h>
 #include "CharacterEncodingDetectorTables.h"
 
 #include "utils/Vector.h"
@@ -118,10 +118,12 @@
             int32_t matches;
             const UCharsetMatch** ucma = ucsdet_detectAll(csd, &matches, &status);
             bool goodmatch = true;
+            int highest = 0;
             const UCharsetMatch* bestCombinedMatch = getPreferred(buf, strlen(buf),
-                    ucma, matches, &goodmatch);
+                    ucma, matches, &goodmatch, &highest);
 
-            if (!goodmatch && strlen(buf) < 20) {
+            ALOGV("goodmatch: %s, highest: %d", goodmatch ? "true" : "false", highest);
+            if (!goodmatch && (highest < 15 || strlen(buf) < 20)) {
                 ALOGV("not a good match, trying with more data");
                 // This string might be too short for ICU to do anything useful with.
                 // (real world example: "Björk" in ISO-8859-1 might be detected as GB18030, because
@@ -146,9 +148,10 @@
                     ucsdet_setText(csd, buf, strlen(buf), &status);
                     ucma = ucsdet_detectAll(csd, &matches, &status);
                     bestCombinedMatch = getPreferred(buf, strlen(buf),
-                            ucma, matches, &goodmatch);
-                    if (!goodmatch) {
+                            ucma, matches, &goodmatch, &highest);
+                    if (!goodmatch && highest <= 15) {
                         ALOGV("still not a good match after adding printable tags");
+                        bestCombinedMatch = NULL;
                     }
                 } else {
                     ALOGV("no printable tags to add");
@@ -157,6 +160,8 @@
 
             if (bestCombinedMatch != NULL) {
                 combinedenc = ucsdet_getName(bestCombinedMatch, &status);
+            } else {
+                combinedenc = "ISO-8859-1";
             }
         }
 
@@ -199,10 +204,17 @@
             if (strcmp(enc,"UTF-8") != 0) {
                 // only convert if the source encoding isn't already UTF-8
                 ALOGV("@@@ using converter %s for %s", enc, mNames.getEntry(i));
+                status = U_ZERO_ERROR;
                 UConverter *conv = ucnv_open(enc, &status);
                 if (U_FAILURE(status)) {
-                    ALOGE("could not create UConverter for %s", enc);
-                    continue;
+                    ALOGW("could not create UConverter for %s (%d), falling back to ISO-8859-1",
+                            enc, status);
+                    status = U_ZERO_ERROR;
+                    conv = ucnv_open("ISO-8859-1", &status);
+                    if (U_FAILURE(status)) {
+                        ALOGW("could not create UConverter for ISO-8859-1 either");
+                        continue;
+                    }
                 }
 
                 // convert from native encoding to UTF-8
@@ -224,7 +236,16 @@
                 } else {
                     // zero terminate
                     *target = 0;
-                    mValues.setEntry(i, buffer);
+                    // strip trailing spaces
+                    while (--target > buffer && *target == ' ') {
+                        *target = 0;
+                    }
+                    // skip leading spaces
+                    char *start = buffer;
+                    while (*start == ' ') {
+                        start++;
+                    }
+                    mValues.setEntry(i, start);
                 }
 
                 delete[] buffer;
@@ -261,7 +282,7 @@
 const UCharsetMatch *CharacterEncodingDetector::getPreferred(
         const char *input, size_t len,
         const UCharsetMatch** ucma, size_t nummatches,
-        bool *goodmatch) {
+        bool *goodmatch, int *highestmatch) {
 
     *goodmatch = false;
     Vector<const UCharsetMatch*> matches;
@@ -316,11 +337,17 @@
         }
 
         ALOGV("%zu: %s %d", i, encname, confidence);
+        status = U_ZERO_ERROR;
         UConverter *conv = ucnv_open(encname, &status);
+        int demerit = 0;
+        if (U_FAILURE(status)) {
+            ALOGV("failed to open %s: %d", encname, status);
+            confidence = 0;
+            demerit += 1000;
+        }
         const char *source = input;
         const char *sourceLimit = input + len;
         status = U_ZERO_ERROR;
-        int demerit = 0;
         int frequentchars = 0;
         int totalchars = 0;
         while (true) {
@@ -337,7 +364,8 @@
             if (c < 0x20 || (c >= 0x7f && c <= 0x009f)) {
                 ALOGV("control character %x", c);
                 demerit += 100;
-            } else if ((c >= 0xa0 && c <= 0xbe)         // symbols, superscripts
+            } else if ((c == 0xa0)                      // no-break space
+                    || (c >= 0xa2 && c <= 0xbe)         // symbols, superscripts
                     || (c == 0xd7) || (c == 0xf7)       // multiplication and division signs
                     || (c >= 0x2000 && c <= 0x209f)) {  // punctuation, superscripts
                 ALOGV("unlikely character %x", c);
@@ -408,10 +436,14 @@
     } else {
         ALOGV("runner up: '%s' w/ %d confidence",
                 ucsdet_getName(matches[runnerupidx], &status), runnerup);
+        if (runnerup < 0) {
+            runnerup = 0;
+        }
         if ((highest - runnerup) > 15) {
             *goodmatch = true;
         }
     }
+    *highestmatch = highest;
     return matches[highestidx];
 }
 
diff --git a/media/libmedia/MediaScannerClient.cpp b/media/libmedia/MediaScannerClient.cpp
index 1661f04..9f803cb 100644
--- a/media/libmedia/MediaScannerClient.cpp
+++ b/media/libmedia/MediaScannerClient.cpp
@@ -25,14 +25,10 @@
 
 namespace android {
 
-MediaScannerClient::MediaScannerClient()
-    :   mEncodingDetector(NULL)
-{
+MediaScannerClient::MediaScannerClient() {
 }
 
-MediaScannerClient::~MediaScannerClient()
-{
-    delete mEncodingDetector;
+MediaScannerClient::~MediaScannerClient() {
 }
 
 void MediaScannerClient::setLocale(const char* locale)
@@ -40,31 +36,16 @@
     mLocale = locale; // not currently used
 }
 
-void MediaScannerClient::beginFile()
-{
-    delete mEncodingDetector;
-    mEncodingDetector = new CharacterEncodingDetector();
+void MediaScannerClient::beginFile() {
 }
 
 status_t MediaScannerClient::addStringTag(const char* name, const char* value)
 {
-    mEncodingDetector->addTag(name, value);
+    handleStringTag(name, value);
     return OK;
 }
 
-void MediaScannerClient::endFile()
-{
-    mEncodingDetector->detectAndConvert();
-
-    int size = mEncodingDetector->size();
-    if (size) {
-        for (int i = 0; i < size; i++) {
-            const char *name;
-            const char *value;
-            mEncodingDetector->getTag(i, &name, &value);
-            handleStringTag(name, value);
-        }
-    }
+void MediaScannerClient::endFile() {
 }
 
 }  // namespace android
diff --git a/media/libmedia/mediaplayer.cpp b/media/libmedia/mediaplayer.cpp
index 6cd377a..9611ac7 100644
--- a/media/libmedia/mediaplayer.cpp
+++ b/media/libmedia/mediaplayer.cpp
@@ -50,6 +50,7 @@
     mListener = NULL;
     mCookie = NULL;
     mStreamType = AUDIO_STREAM_MUSIC;
+    mAudioAttributesParcel = NULL;
     mCurrentPosition = -1;
     mSeekPosition = -1;
     mCurrentState = MEDIA_PLAYER_IDLE;
@@ -68,6 +69,10 @@
 MediaPlayer::~MediaPlayer()
 {
     ALOGV("destructor");
+    if (mAudioAttributesParcel != NULL) {
+        delete mAudioAttributesParcel;
+        mAudioAttributesParcel = NULL;
+    }
     AudioSystem::releaseAudioSessionId(mAudioSessionId, -1);
     disconnect();
     IPCThreadState::self()->flushCommands();
@@ -237,6 +242,9 @@
 {
     if ( (mPlayer != 0) && ( mCurrentState & ( MEDIA_PLAYER_INITIALIZED | MEDIA_PLAYER_STOPPED) ) ) {
         mPlayer->setAudioStreamType(mStreamType);
+        if (mAudioAttributesParcel != NULL) {
+            mPlayer->setParameter(KEY_PARAMETER_AUDIO_ATTRIBUTES, *mAudioAttributesParcel);
+        }
         mCurrentState = MEDIA_PLAYER_PREPARING;
         return mPlayer->prepareAsync();
     }
@@ -662,8 +670,17 @@
     if (mPlayer != NULL) {
         return  mPlayer->setParameter(key, request);
     }
-    ALOGV("setParameter: no active player");
-    return INVALID_OPERATION;
+    switch (key) {
+    case KEY_PARAMETER_AUDIO_ATTRIBUTES:
+        // no player, save the marshalled audio attributes
+        if (mAudioAttributesParcel != NULL) { delete mAudioAttributesParcel; };
+        mAudioAttributesParcel = new Parcel();
+        mAudioAttributesParcel->appendFrom(&request, 0, request.dataSize());
+        return OK;
+    default:
+        ALOGV("setParameter: no active player");
+        return INVALID_OPERATION;
+    }
 }
 
 status_t MediaPlayer::getParameter(int key, Parcel *reply)
diff --git a/media/libmediaplayerservice/Android.mk b/media/libmediaplayerservice/Android.mk
index 0c7e590c..adc066d 100644
--- a/media/libmediaplayerservice/Android.mk
+++ b/media/libmediaplayerservice/Android.mk
@@ -28,6 +28,7 @@
     libcamera_client            \
     libcrypto                   \
     libcutils                   \
+    libdrmframework             \
     liblog                      \
     libdl                       \
     libgui                      \
diff --git a/media/libmediaplayerservice/MediaPlayerService.cpp b/media/libmediaplayerservice/MediaPlayerService.cpp
index 2c48306..c8cb7ed 100644
--- a/media/libmediaplayerservice/MediaPlayerService.cpp
+++ b/media/libmediaplayerservice/MediaPlayerService.cpp
@@ -204,6 +204,8 @@
 // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 // |                       content_type                            |
 // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |                       source                                  |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 // |                       flags                                   |
 // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 // |                       kAudioAttributesMarshallTagFlattenTags  | // ignore tags if not found
@@ -219,6 +221,7 @@
 {
     attributes->usage = (audio_usage_t) parcel.readInt32();
     attributes->content_type = (audio_content_type_t) parcel.readInt32();
+    attributes->source = (audio_source_t) parcel.readInt32();
     attributes->flags = (audio_flags_mask_t) parcel.readInt32();
     const bool hasFlattenedTag = (parcel.readInt32() == kAudioAttributesMarshallTagFlattenTags);
     if (hasFlattenedTag) {
@@ -1798,7 +1801,9 @@
     //ALOGV("write(%p, %u)", buffer, size);
     if (mTrack != 0) {
         ssize_t ret = mTrack->write(buffer, size);
-        mBytesWritten += ret;
+        if (ret >= 0) {
+            mBytesWritten += ret;
+        }
         return ret;
     }
     return NO_INIT;
@@ -1945,7 +1950,7 @@
 #define LOG_TAG "AudioCache"
 MediaPlayerService::AudioCache::AudioCache(const sp<IMemoryHeap>& heap) :
     mHeap(heap), mChannelCount(0), mFrameCount(1024), mSampleRate(0), mSize(0),
-    mError(NO_ERROR),  mCommandComplete(false)
+    mFrameSize(1), mError(NO_ERROR),  mCommandComplete(false)
 {
 }
 
@@ -1962,14 +1967,14 @@
 status_t MediaPlayerService::AudioCache::getPosition(uint32_t *position) const
 {
     if (position == 0) return BAD_VALUE;
-    *position = mSize;
+    *position = mSize / mFrameSize;
     return NO_ERROR;
 }
 
 status_t MediaPlayerService::AudioCache::getFramesWritten(uint32_t *written) const
 {
     if (written == 0) return BAD_VALUE;
-    *written = mSize;
+    *written = mSize / mFrameSize;
     return NO_ERROR;
 }
 
@@ -2031,6 +2036,8 @@
 
     if (actualSize > 0) {
         sink->write(mBuffer, actualSize);
+        // Could return false on sink->write() error or short count.
+        // Not necessarily appropriate but would work for AudioCache behavior.
     }
 
     return true;
@@ -2053,6 +2060,9 @@
     mChannelCount = (uint16_t)channelCount;
     mFormat = format;
     mMsecsPerFrame = 1.e3 / (float) sampleRate;
+    mFrameSize =  audio_is_linear_pcm(mFormat)
+            ? mChannelCount * audio_bytes_per_sample(mFormat) : 1;
+    mFrameCount = mHeap->getSize() / mFrameSize;
 
     if (cb != NULL) {
         mCallbackThread = new CallbackThread(this, cb, cookie);
@@ -2082,12 +2092,26 @@
     if (p == NULL) return NO_INIT;
     p += mSize;
     ALOGV("memcpy(%p, %p, %u)", p, buffer, size);
-    if (mSize + size > mHeap->getSize()) {
+
+    bool overflow = mSize + size > mHeap->getSize();
+    if (overflow) {
         ALOGE("Heap size overflow! req size: %d, max size: %d", (mSize + size), mHeap->getSize());
         size = mHeap->getSize() - mSize;
     }
+    size -= size % mFrameSize; // consume only integral amounts of frame size
     memcpy(p, buffer, size);
     mSize += size;
+
+    if (overflow) {
+        // Signal heap filled here (last frame may be truncated).
+        // After this point, no more data should be written as the
+        // heap is filled and the AudioCache should be effectively
+        // immutable with respect to future writes.
+        //
+        // It is thus safe for another thread to read the AudioCache.
+        mCommandComplete = true;
+        mSignal.signal();
+    }
     return size;
 }
 
diff --git a/media/libmediaplayerservice/MediaPlayerService.h b/media/libmediaplayerservice/MediaPlayerService.h
index 406e3f6..4fe7075 100644
--- a/media/libmediaplayerservice/MediaPlayerService.h
+++ b/media/libmediaplayerservice/MediaPlayerService.h
@@ -194,7 +194,7 @@
         virtual ssize_t         bufferSize() const { return frameSize() * mFrameCount; }
         virtual ssize_t         frameCount() const { return mFrameCount; }
         virtual ssize_t         channelCount() const { return (ssize_t)mChannelCount; }
-        virtual ssize_t         frameSize() const { return ssize_t(mChannelCount * ((mFormat == AUDIO_FORMAT_PCM_16_BIT)?sizeof(int16_t):sizeof(u_int8_t))); }
+        virtual ssize_t         frameSize() const { return (ssize_t)mFrameSize; }
         virtual uint32_t        latency() const;
         virtual float           msecsPerFrame() const;
         virtual status_t        getPosition(uint32_t *position) const;
@@ -244,6 +244,7 @@
         ssize_t             mFrameCount;
         uint32_t            mSampleRate;
         uint32_t            mSize;
+        size_t              mFrameSize;
         int                 mError;
         bool                mCommandComplete;
 
diff --git a/media/libmediaplayerservice/StagefrightRecorder.cpp b/media/libmediaplayerservice/StagefrightRecorder.cpp
index 8774117..e2bcb1e 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.cpp
+++ b/media/libmediaplayerservice/StagefrightRecorder.cpp
@@ -1433,6 +1433,10 @@
             format->setString("mime", MEDIA_MIMETYPE_VIDEO_AVC);
             break;
 
+        case VIDEO_ENCODER_VP8:
+            format->setString("mime", MEDIA_MIMETYPE_VIDEO_VP8);
+            break;
+
         default:
             CHECK(!"Should not be here, unsupported video encoding.");
             break;
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.cpp b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
index 76e1d54..8e1987a 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
@@ -32,6 +32,9 @@
 #include <media/stagefright/MediaExtractor.h>
 #include <media/stagefright/MediaSource.h>
 #include <media/stagefright/MetaData.h>
+#include <media/stagefright/Utils.h>
+#include "../../libstagefright/include/DRMExtractor.h"
+#include "../../libstagefright/include/NuCachedSource2.h"
 #include "../../libstagefright/include/WVMExtractor.h"
 
 namespace android {
@@ -47,18 +50,28 @@
       mAudioIsVorbis(false),
       mIsWidevine(false),
       mUIDValid(uidValid),
-      mUID(uid) {
+      mUID(uid),
+      mDrmManagerClient(NULL),
+      mMetaDataSize(-1ll),
+      mBitrate(-1ll),
+      mPollBufferingGeneration(0) {
     resetDataSource();
     DataSource::RegisterDefaultSniffers();
 }
 
 void NuPlayer::GenericSource::resetDataSource() {
+    mAudioTimeUs = 0;
+    mVideoTimeUs = 0;
     mHTTPService.clear();
     mUri.clear();
     mUriHeaders.clear();
     mFd = -1;
     mOffset = 0;
     mLength = 0;
+    setDrmPlaybackStatusIfNeeded(Playback::STOP, 0);
+    mDecryptHandle = NULL;
+    mDrmManagerClient = NULL;
+    mStarted = false;
 }
 
 status_t NuPlayer::GenericSource::setDataSource(
@@ -92,18 +105,18 @@
     return OK;
 }
 
-status_t NuPlayer::GenericSource::initFromDataSource(
-        const sp<DataSource> &dataSource,
-        const char* mime) {
+status_t NuPlayer::GenericSource::initFromDataSource() {
     sp<MediaExtractor> extractor;
 
+    CHECK(mDataSource != NULL);
+
     if (mIsWidevine) {
         String8 mimeType;
         float confidence;
         sp<AMessage> dummy;
         bool success;
 
-        success = SniffWVM(dataSource, &mimeType, &confidence, &dummy);
+        success = SniffWVM(mDataSource, &mimeType, &confidence, &dummy);
         if (!success
                 || strcasecmp(
                     mimeType.string(), MEDIA_MIMETYPE_CONTAINER_WVM)) {
@@ -111,20 +124,25 @@
             return UNKNOWN_ERROR;
         }
 
-        sp<WVMExtractor> wvmExtractor = new WVMExtractor(dataSource);
-        wvmExtractor->setAdaptiveStreamingMode(true);
+        mWVMExtractor = new WVMExtractor(mDataSource);
+        mWVMExtractor->setAdaptiveStreamingMode(true);
         if (mUIDValid) {
-            wvmExtractor->setUID(mUID);
+            mWVMExtractor->setUID(mUID);
         }
-        extractor = wvmExtractor;
+        extractor = mWVMExtractor;
     } else {
-        extractor = MediaExtractor::Create(dataSource, mime);
+        extractor = MediaExtractor::Create(mDataSource,
+                mSniffedMIME.empty() ? NULL: mSniffedMIME.c_str());
     }
 
     if (extractor == NULL) {
         return UNKNOWN_ERROR;
     }
 
+    if (extractor->getDrmFlag()) {
+        checkDrmStatus(mDataSource);
+    }
+
     sp<MetaData> fileMeta = extractor->getMetaData();
     if (fileMeta != NULL) {
         int64_t duration;
@@ -133,14 +151,20 @@
         }
     }
 
+    int32_t totalBitrate = 0;
+
     for (size_t i = 0; i < extractor->countTracks(); ++i) {
+        sp<MediaSource> track = extractor->getTrack(i);
+
         sp<MetaData> meta = extractor->getTrackMetaData(i);
 
         const char *mime;
         CHECK(meta->findCString(kKeyMIMEType, &mime));
 
-        sp<MediaSource> track = extractor->getTrack(i);
-
+        // Do the string compare immediately with "mime",
+        // we can't assume "mime" would stay valid after another
+        // extractor operation, some extractors might modify meta
+        // during getTrack() and make it invalid.
         if (!strncasecmp(mime, "audio/", 6)) {
             if (mAudioTrack.mSource == NULL) {
                 mAudioTrack.mIndex = i;
@@ -177,12 +201,43 @@
                     mDurationUs = durationUs;
                 }
             }
+
+            int32_t bitrate;
+            if (totalBitrate >= 0 && meta->findInt32(kKeyBitRate, &bitrate)) {
+                totalBitrate += bitrate;
+            } else {
+                totalBitrate = -1;
+            }
         }
     }
 
+    mBitrate = totalBitrate;
+
     return OK;
 }
 
+void NuPlayer::GenericSource::checkDrmStatus(const sp<DataSource>& dataSource) {
+    dataSource->getDrmInfo(mDecryptHandle, &mDrmManagerClient);
+    if (mDecryptHandle != NULL) {
+        CHECK(mDrmManagerClient);
+        if (RightsStatus::RIGHTS_VALID != mDecryptHandle->status) {
+            sp<AMessage> msg = dupNotify();
+            msg->setInt32("what", kWhatDrmNoLicense);
+            msg->post();
+        }
+    }
+}
+
+int64_t NuPlayer::GenericSource::getLastReadPosition() {
+    if (mAudioTrack.mSource != NULL) {
+        return mAudioTimeUs;
+    } else if (mVideoTrack.mSource != NULL) {
+        return mVideoTimeUs;
+    } else {
+        return 0;
+    }
+}
+
 status_t NuPlayer::GenericSource::setBuffers(
         bool audio, Vector<MediaBuffer *> &buffers) {
     if (mIsWidevine && !audio) {
@@ -213,39 +268,65 @@
 
 void NuPlayer::GenericSource::onPrepareAsync() {
     // delayed data source creation
-    AString sniffedMIME;
-    sp<DataSource> dataSource;
+    if (mDataSource == NULL) {
+        if (!mUri.empty()) {
+            mIsWidevine = !strncasecmp(mUri.c_str(), "widevine://", 11);
 
-    if (!mUri.empty()) {
-        mIsWidevine = !strncasecmp(mUri.c_str(), "widevine://", 11);
+            mDataSource = DataSource::CreateFromURI(
+                   mHTTPService, mUri.c_str(), &mUriHeaders, &mContentType);
+        } else {
+            // set to false first, if the extractor
+            // comes back as secure, set it to true then.
+            mIsWidevine = false;
 
-        dataSource = DataSource::CreateFromURI(
-               mHTTPService, mUri.c_str(), &mUriHeaders, &sniffedMIME);
-    } else {
-        // set to false first, if the extractor
-        // comes back as secure, set it to true then.
-        mIsWidevine = false;
+            mDataSource = new FileSource(mFd, mOffset, mLength);
+        }
 
-        dataSource = new FileSource(mFd, mOffset, mLength);
+        if (mDataSource == NULL) {
+            ALOGE("Failed to create data source!");
+            notifyPreparedAndCleanup(UNKNOWN_ERROR);
+            return;
+        }
+
+        if (mDataSource->flags() & DataSource::kIsCachingDataSource) {
+            mCachedSource = static_cast<NuCachedSource2 *>(mDataSource.get());
+        }
+
+        if (mIsWidevine || mCachedSource != NULL) {
+            schedulePollBuffering();
+        }
     }
 
-    if (dataSource == NULL) {
-        ALOGE("Failed to create data source!");
-        notifyPrepared(UNKNOWN_ERROR);
+    // check initial caching status
+    status_t err = prefillCacheIfNecessary();
+    if (err != OK) {
+        if (err == -EAGAIN) {
+            (new AMessage(kWhatPrepareAsync, id()))->post(200000);
+        } else {
+            ALOGE("Failed to prefill data cache!");
+            notifyPreparedAndCleanup(UNKNOWN_ERROR);
+        }
         return;
     }
 
-    status_t err = initFromDataSource(
-            dataSource, sniffedMIME.empty() ? NULL : sniffedMIME.c_str());
+    // init extrator from data source
+    err = initFromDataSource();
 
     if (err != OK) {
         ALOGE("Failed to init from data source!");
-        notifyPrepared(err);
+        notifyPreparedAndCleanup(err);
         return;
     }
 
     if (mVideoTrack.mSource != NULL) {
-        notifyVideoSizeChanged(getFormat(false /* audio */));
+        sp<MetaData> meta = doGetFormatMeta(false /* audio */);
+        sp<AMessage> msg = new AMessage;
+        err = convertMetaDataToMessage(meta, &msg);
+        if(err != OK) {
+            notifyPreparedAndCleanup(err);
+            return;
+        }
+        notifyVideoSizeChanged(msg);
     }
 
     notifyFlagsChanged(
@@ -258,6 +339,89 @@
     notifyPrepared();
 }
 
+void NuPlayer::GenericSource::notifyPreparedAndCleanup(status_t err) {
+    if (err != OK) {
+        mMetaDataSize = -1ll;
+        mContentType = "";
+        mSniffedMIME = "";
+        mDataSource.clear();
+        mCachedSource.clear();
+
+        cancelPollBuffering();
+    }
+    notifyPrepared(err);
+}
+
+status_t NuPlayer::GenericSource::prefillCacheIfNecessary() {
+    CHECK(mDataSource != NULL);
+
+    if (mCachedSource == NULL) {
+        // no prefill if the data source is not cached
+        return OK;
+    }
+
+    // We're not doing this for streams that appear to be audio-only
+    // streams to ensure that even low bandwidth streams start
+    // playing back fairly instantly.
+    if (!strncasecmp(mContentType.string(), "audio/", 6)) {
+        return OK;
+    }
+
+    // We're going to prefill the cache before trying to instantiate
+    // the extractor below, as the latter is an operation that otherwise
+    // could block on the datasource for a significant amount of time.
+    // During that time we'd be unable to abort the preparation phase
+    // without this prefill.
+
+    // Initially make sure we have at least 192 KB for the sniff
+    // to complete without blocking.
+    static const size_t kMinBytesForSniffing = 192 * 1024;
+    static const size_t kDefaultMetaSize = 200000;
+
+    status_t finalStatus;
+
+    size_t cachedDataRemaining =
+            mCachedSource->approxDataRemaining(&finalStatus);
+
+    if (finalStatus != OK || (mMetaDataSize >= 0
+            && (off64_t)cachedDataRemaining >= mMetaDataSize)) {
+        ALOGV("stop caching, status %d, "
+                "metaDataSize %lld, cachedDataRemaining %zu",
+                finalStatus, mMetaDataSize, cachedDataRemaining);
+        return OK;
+    }
+
+    ALOGV("now cached %zu bytes of data", cachedDataRemaining);
+
+    if (mMetaDataSize < 0
+            && cachedDataRemaining >= kMinBytesForSniffing) {
+        String8 tmp;
+        float confidence;
+        sp<AMessage> meta;
+        if (!mCachedSource->sniff(&tmp, &confidence, &meta)) {
+            return UNKNOWN_ERROR;
+        }
+
+        // We successfully identified the file's extractor to
+        // be, remember this mime type so we don't have to
+        // sniff it again when we call MediaExtractor::Create()
+        mSniffedMIME = tmp.string();
+
+        if (meta == NULL
+                || !meta->findInt64("meta-data-size",
+                        reinterpret_cast<int64_t*>(&mMetaDataSize))) {
+            mMetaDataSize = kDefaultMetaSize;
+        }
+
+        if (mMetaDataSize < 0ll) {
+            ALOGE("invalid metaDataSize = %lld bytes", mMetaDataSize);
+            return UNKNOWN_ERROR;
+        }
+    }
+
+    return -EAGAIN;
+}
+
 void NuPlayer::GenericSource::start() {
     ALOGI("start");
 
@@ -266,7 +430,7 @@
         mAudioTrack.mPackets =
             new AnotherPacketSource(mAudioTrack.mSource->getFormat());
 
-        readBuffer(MEDIA_TRACK_TYPE_AUDIO);
+        postReadBuffer(MEDIA_TRACK_TYPE_AUDIO);
     }
 
     if (mVideoTrack.mSource != NULL) {
@@ -274,14 +438,102 @@
         mVideoTrack.mPackets =
             new AnotherPacketSource(mVideoTrack.mSource->getFormat());
 
-        readBuffer(MEDIA_TRACK_TYPE_VIDEO);
+        postReadBuffer(MEDIA_TRACK_TYPE_VIDEO);
     }
+
+    setDrmPlaybackStatusIfNeeded(Playback::START, getLastReadPosition() / 1000);
+    mStarted = true;
+}
+
+void NuPlayer::GenericSource::stop() {
+    // nothing to do, just account for DRM playback status
+    setDrmPlaybackStatusIfNeeded(Playback::STOP, 0);
+    mStarted = false;
+}
+
+void NuPlayer::GenericSource::pause() {
+    // nothing to do, just account for DRM playback status
+    setDrmPlaybackStatusIfNeeded(Playback::PAUSE, 0);
+    mStarted = false;
+}
+
+void NuPlayer::GenericSource::resume() {
+    // nothing to do, just account for DRM playback status
+    setDrmPlaybackStatusIfNeeded(Playback::START, getLastReadPosition() / 1000);
+    mStarted = true;
+}
+
+void NuPlayer::GenericSource::setDrmPlaybackStatusIfNeeded(int playbackStatus, int64_t position) {
+    if (mDecryptHandle != NULL) {
+        mDrmManagerClient->setPlaybackStatus(mDecryptHandle, playbackStatus, position);
+    }
+    mSubtitleTrack.mPackets = new AnotherPacketSource(NULL);
+    mTimedTextTrack.mPackets = new AnotherPacketSource(NULL);
 }
 
 status_t NuPlayer::GenericSource::feedMoreTSData() {
     return OK;
 }
 
+void NuPlayer::GenericSource::schedulePollBuffering() {
+    sp<AMessage> msg = new AMessage(kWhatPollBuffering, id());
+    msg->setInt32("generation", mPollBufferingGeneration);
+    msg->post(1000000ll);
+}
+
+void NuPlayer::GenericSource::cancelPollBuffering() {
+    ++mPollBufferingGeneration;
+}
+
+void NuPlayer::GenericSource::notifyBufferingUpdate(int percentage) {
+    sp<AMessage> msg = dupNotify();
+    msg->setInt32("what", kWhatBufferingUpdate);
+    msg->setInt32("percentage", percentage);
+    msg->post();
+}
+
+void NuPlayer::GenericSource::onPollBuffering() {
+    status_t finalStatus = UNKNOWN_ERROR;
+    int64_t cachedDurationUs = 0ll;
+
+    if (mCachedSource != NULL) {
+        size_t cachedDataRemaining =
+                mCachedSource->approxDataRemaining(&finalStatus);
+
+        if (finalStatus == OK) {
+            off64_t size;
+            int64_t bitrate = 0ll;
+            if (mDurationUs > 0 && mCachedSource->getSize(&size) == OK) {
+                bitrate = size * 8000000ll / mDurationUs;
+            } else if (mBitrate > 0) {
+                bitrate = mBitrate;
+            }
+            if (bitrate > 0) {
+                cachedDurationUs = cachedDataRemaining * 8000000ll / bitrate;
+            }
+        }
+    } else if (mWVMExtractor != NULL) {
+        cachedDurationUs
+            = mWVMExtractor->getCachedDurationUs(&finalStatus);
+    }
+
+    if (finalStatus == ERROR_END_OF_STREAM) {
+        notifyBufferingUpdate(100);
+        cancelPollBuffering();
+        return;
+    } else if (cachedDurationUs > 0ll && mDurationUs > 0ll) {
+        int percentage = 100.0 * cachedDurationUs / mDurationUs;
+        if (percentage > 100) {
+            percentage = 100;
+        }
+
+        notifyBufferingUpdate(percentage);
+    }
+
+    schedulePollBuffering();
+}
+
+
 void NuPlayer::GenericSource::onMessageReceived(const sp<AMessage> &msg) {
     switch (msg->what()) {
       case kWhatPrepareAsync:
@@ -364,6 +616,45 @@
 
           break;
       }
+      case kWhatPollBuffering:
+      {
+          int32_t generation;
+          CHECK(msg->findInt32("generation", &generation));
+          if (generation == mPollBufferingGeneration) {
+              onPollBuffering();
+          }
+          break;
+      }
+
+      case kWhatGetFormat:
+      {
+          onGetFormatMeta(msg);
+          break;
+      }
+
+      case kWhatGetSelectedTrack:
+      {
+          onGetSelectedTrack(msg);
+          break;
+      }
+
+      case kWhatSelectTrack:
+      {
+          onSelectTrack(msg);
+          break;
+      }
+
+      case kWhatSeek:
+      {
+          onSeek(msg);
+          break;
+      }
+
+      case kWhatReadBuffer:
+      {
+          onReadBuffer(msg);
+          break;
+      }
 
       default:
           Source::onMessageReceived(msg);
@@ -440,6 +731,34 @@
 }
 
 sp<MetaData> NuPlayer::GenericSource::getFormatMeta(bool audio) {
+    sp<AMessage> msg = new AMessage(kWhatGetFormat, id());
+    msg->setInt32("audio", audio);
+
+    sp<AMessage> response;
+    void *format;
+    status_t err = msg->postAndAwaitResponse(&response);
+    if (err == OK && response != NULL) {
+        CHECK(response->findPointer("format", &format));
+        return (MetaData *)format;
+    } else {
+        return NULL;
+    }
+}
+
+void NuPlayer::GenericSource::onGetFormatMeta(sp<AMessage> msg) const {
+    int32_t audio;
+    CHECK(msg->findInt32("audio", &audio));
+
+    sp<AMessage> response = new AMessage;
+    sp<MetaData> format = doGetFormatMeta(audio);
+    response->setPointer("format", format.get());
+
+    uint32_t replyID;
+    CHECK(msg->senderAwaitsResponse(&replyID));
+    response->postReply(replyID);
+}
+
+sp<MetaData> NuPlayer::GenericSource::doGetFormatMeta(bool audio) const {
     sp<MediaSource> source = audio ? mAudioTrack.mSource : mVideoTrack.mSource;
 
     if (source == NULL) {
@@ -459,7 +778,7 @@
 
     if (mIsWidevine && !audio) {
         // try to read a buffer as we may not have been able to the last time
-        readBuffer(MEDIA_TRACK_TYPE_VIDEO, -1ll);
+        postReadBuffer(MEDIA_TRACK_TYPE_VIDEO);
     }
 
     status_t finalResult;
@@ -470,18 +789,7 @@
     status_t result = track->mPackets->dequeueAccessUnit(accessUnit);
 
     if (!track->mPackets->hasBufferAvailable(&finalResult)) {
-        readBuffer(audio? MEDIA_TRACK_TYPE_AUDIO : MEDIA_TRACK_TYPE_VIDEO, -1ll);
-    }
-
-    if (mSubtitleTrack.mSource == NULL && mTimedTextTrack.mSource == NULL) {
-        return result;
-    }
-
-    if (mSubtitleTrack.mSource != NULL) {
-        CHECK(mSubtitleTrack.mPackets != NULL);
-    }
-    if (mTimedTextTrack.mSource != NULL) {
-        CHECK(mTimedTextTrack.mPackets != NULL);
+        postReadBuffer(audio? MEDIA_TRACK_TYPE_AUDIO : MEDIA_TRACK_TYPE_VIDEO);
     }
 
     if (result != OK) {
@@ -575,6 +883,35 @@
 }
 
 ssize_t NuPlayer::GenericSource::getSelectedTrack(media_track_type type) const {
+    sp<AMessage> msg = new AMessage(kWhatGetSelectedTrack, id());
+    msg->setInt32("type", type);
+
+    sp<AMessage> response;
+    int32_t index;
+    status_t err = msg->postAndAwaitResponse(&response);
+    if (err == OK && response != NULL) {
+        CHECK(response->findInt32("index", &index));
+        return index;
+    } else {
+        return -1;
+    }
+}
+
+void NuPlayer::GenericSource::onGetSelectedTrack(sp<AMessage> msg) const {
+    int32_t tmpType;
+    CHECK(msg->findInt32("type", &tmpType));
+    media_track_type type = (media_track_type)tmpType;
+
+    sp<AMessage> response = new AMessage;
+    ssize_t index = doGetSelectedTrack(type);
+    response->setInt32("index", index);
+
+    uint32_t replyID;
+    CHECK(msg->senderAwaitsResponse(&replyID));
+    response->postReply(replyID);
+}
+
+ssize_t NuPlayer::GenericSource::doGetSelectedTrack(media_track_type type) const {
     const Track *track = NULL;
     switch (type) {
     case MEDIA_TRACK_TYPE_VIDEO:
@@ -602,6 +939,34 @@
 
 status_t NuPlayer::GenericSource::selectTrack(size_t trackIndex, bool select) {
     ALOGV("%s track: %zu", select ? "select" : "deselect", trackIndex);
+    sp<AMessage> msg = new AMessage(kWhatSelectTrack, id());
+    msg->setInt32("trackIndex", trackIndex);
+    msg->setInt32("select", trackIndex);
+
+    sp<AMessage> response;
+    status_t err = msg->postAndAwaitResponse(&response);
+    if (err == OK && response != NULL) {
+        CHECK(response->findInt32("err", &err));
+    }
+
+    return err;
+}
+
+void NuPlayer::GenericSource::onSelectTrack(sp<AMessage> msg) {
+    int32_t trackIndex, select;
+    CHECK(msg->findInt32("trackIndex", &trackIndex));
+    CHECK(msg->findInt32("select", &select));
+
+    sp<AMessage> response = new AMessage;
+    status_t err = doSelectTrack(trackIndex, select);
+    response->setInt32("err", err);
+
+    uint32_t replyID;
+    CHECK(msg->senderAwaitsResponse(&replyID));
+    response->postReply(replyID);
+}
+
+status_t NuPlayer::GenericSource::doSelectTrack(size_t trackIndex, bool select) {
     if (trackIndex >= mSources.size()) {
         return BAD_INDEX;
     }
@@ -672,6 +1037,32 @@
 }
 
 status_t NuPlayer::GenericSource::seekTo(int64_t seekTimeUs) {
+    sp<AMessage> msg = new AMessage(kWhatSeek, id());
+    msg->setInt64("seekTimeUs", seekTimeUs);
+
+    sp<AMessage> response;
+    status_t err = msg->postAndAwaitResponse(&response);
+    if (err == OK && response != NULL) {
+        CHECK(response->findInt32("err", &err));
+    }
+
+    return err;
+}
+
+void NuPlayer::GenericSource::onSeek(sp<AMessage> msg) {
+    int64_t seekTimeUs;
+    CHECK(msg->findInt64("seekTimeUs", &seekTimeUs));
+
+    sp<AMessage> response = new AMessage;
+    status_t err = doSeek(seekTimeUs);
+    response->setInt32("err", err);
+
+    uint32_t replyID;
+    CHECK(msg->senderAwaitsResponse(&replyID));
+    response->postReply(replyID);
+}
+
+status_t NuPlayer::GenericSource::doSeek(int64_t seekTimeUs) {
     if (mVideoTrack.mSource != NULL) {
         int64_t actualTimeUs;
         readBuffer(MEDIA_TRACK_TYPE_VIDEO, seekTimeUs, &actualTimeUs);
@@ -683,6 +1074,10 @@
         readBuffer(MEDIA_TRACK_TYPE_AUDIO, seekTimeUs);
     }
 
+    setDrmPlaybackStatusIfNeeded(Playback::START, seekTimeUs / 1000);
+    if (!mStarted) {
+        setDrmPlaybackStatusIfNeeded(Playback::PAUSE, 0);
+    }
     return OK;
 }
 
@@ -752,6 +1147,19 @@
     return ab;
 }
 
+void NuPlayer::GenericSource::postReadBuffer(media_track_type trackType) {
+    sp<AMessage> msg = new AMessage(kWhatReadBuffer, id());
+    msg->setInt32("trackType", trackType);
+    msg->post();
+}
+
+void NuPlayer::GenericSource::onReadBuffer(sp<AMessage> msg) {
+    int32_t tmpType;
+    CHECK(msg->findInt32("trackType", &tmpType));
+    media_track_type trackType = (media_track_type)tmpType;
+    readBuffer(trackType);
+}
+
 void NuPlayer::GenericSource::readBuffer(
         media_track_type trackType, int64_t seekTimeUs, int64_t *actualTimeUs, bool formatChange) {
     Track *track;
@@ -800,6 +1208,14 @@
         options.clearSeekTo();
 
         if (err == OK) {
+            int64_t timeUs;
+            CHECK(mbuf->meta_data()->findInt64(kKeyTime, &timeUs));
+            if (trackType == MEDIA_TRACK_TYPE_AUDIO) {
+                mAudioTimeUs = timeUs;
+            } else if (trackType == MEDIA_TRACK_TYPE_VIDEO) {
+                mVideoTimeUs = timeUs;
+            }
+
             // formatChange && seeking: track whose source is changed during selection
             // formatChange && !seeking: track whose source is not changed during selection
             // !formatChange: normal seek
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.h b/media/libmediaplayerservice/nuplayer/GenericSource.h
index d3081de..50ff98a 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.h
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.h
@@ -27,12 +27,16 @@
 
 namespace android {
 
+class DecryptHandle;
+class DrmManagerClient;
 struct AnotherPacketSource;
 struct ARTSPController;
 struct DataSource;
 struct IMediaHTTPService;
 struct MediaSource;
 class MediaBuffer;
+struct NuCachedSource2;
+struct WVMExtractor;
 
 struct NuPlayer::GenericSource : public NuPlayer::Source {
     GenericSource(const sp<AMessage> &notify, bool uidValid, uid_t uid);
@@ -47,6 +51,9 @@
     virtual void prepareAsync();
 
     virtual void start();
+    virtual void stop();
+    virtual void pause();
+    virtual void resume();
 
     virtual status_t feedMoreTSData();
 
@@ -76,6 +83,12 @@
         kWhatSendSubtitleData,
         kWhatSendTimedTextData,
         kWhatChangeAVSource,
+        kWhatPollBuffering,
+        kWhatGetFormat,
+        kWhatGetSelectedTrack,
+        kWhatSelectTrack,
+        kWhatSeek,
+        kWhatReadBuffer,
     };
 
     Vector<sp<MediaSource> > mSources;
@@ -87,7 +100,9 @@
     };
 
     Track mAudioTrack;
+    int64_t mAudioTimeUs;
     Track mVideoTrack;
+    int64_t mVideoTimeUs;
     Track mSubtitleTrack;
     Track mTimedTextTrack;
 
@@ -105,14 +120,42 @@
     int64_t mOffset;
     int64_t mLength;
 
-    sp<ALooper> mLooper;
+    sp<DataSource> mDataSource;
+    sp<NuCachedSource2> mCachedSource;
+    sp<WVMExtractor> mWVMExtractor;
+    DrmManagerClient *mDrmManagerClient;
+    sp<DecryptHandle> mDecryptHandle;
+    bool mStarted;
+    String8 mContentType;
+    AString mSniffedMIME;
+    off64_t mMetaDataSize;
+    int64_t mBitrate;
+    int32_t mPollBufferingGeneration;
 
+    sp<ALooper> mLooper;
 
     void resetDataSource();
 
-    status_t initFromDataSource(
-            const sp<DataSource> &dataSource,
-            const char *mime);
+    status_t initFromDataSource();
+    void checkDrmStatus(const sp<DataSource>& dataSource);
+    int64_t getLastReadPosition();
+    void setDrmPlaybackStatusIfNeeded(int playbackStatus, int64_t position);
+
+    status_t prefillCacheIfNecessary();
+
+    void notifyPreparedAndCleanup(status_t err);
+
+    void onGetFormatMeta(sp<AMessage> msg) const;
+    sp<MetaData> doGetFormatMeta(bool audio) const;
+
+    void onGetSelectedTrack(sp<AMessage> msg) const;
+    ssize_t doGetSelectedTrack(media_track_type type) const;
+
+    void onSelectTrack(sp<AMessage> msg);
+    status_t doSelectTrack(size_t trackIndex, bool select);
+
+    void onSeek(sp<AMessage> msg);
+    status_t doSeek(int64_t seekTimeUs);
 
     void onPrepareAsync();
 
@@ -129,10 +172,17 @@
             media_track_type trackType,
             int64_t *actualTimeUs = NULL);
 
+    void postReadBuffer(media_track_type trackType);
+    void onReadBuffer(sp<AMessage> msg);
     void readBuffer(
             media_track_type trackType,
             int64_t seekTimeUs = -1ll, int64_t *actualTimeUs = NULL, bool formatChange = false);
 
+    void schedulePollBuffering();
+    void cancelPollBuffering();
+    void onPollBuffering();
+    void notifyBufferingUpdate(int percentage);
+
     DISALLOW_EVIL_CONSTRUCTORS(GenericSource);
 };
 
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
index 5e7ecfa..4a5d18a 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
@@ -145,9 +145,12 @@
 NuPlayer::NuPlayer()
     : mUIDValid(false),
       mSourceFlags(0),
+      mCurrentPositionUs(0),
       mVideoIsAVC(false),
       mOffloadAudio(false),
       mCurrentOffloadInfo(AUDIO_INFO_INITIALIZER),
+      mAudioDecoderGeneration(0),
+      mVideoDecoderGeneration(0),
       mAudioEOS(false),
       mVideoEOS(false),
       mScanSourcesPending(false),
@@ -278,7 +281,7 @@
         msg->setObject(
                 "native-window",
                 new NativeWindowWrapper(
-                    new Surface(bufferProducer)));
+                    new Surface(bufferProducer, true /* controlledByApp */)));
     }
 
     msg->post();
@@ -538,6 +541,14 @@
                         static_cast<NativeWindowWrapper *>(obj.get())));
 
             if (obj != NULL) {
+                if (mStarted && mVideoDecoder != NULL) {
+                    // Issue a seek to refresh the video screen only if started otherwise
+                    // the extractor may not yet be started and will assert.
+                    // If the video decoder is not set (perhaps audio only in this case)
+                    // do not perform a seek as it is not needed.
+                    mDeferredActions.push_back(new SeekAction(mCurrentPositionUs));
+                }
+
                 // If there is a new surface texture, instantiate decoders
                 // again if possible.
                 mDeferredActions.push_back(
@@ -691,6 +702,25 @@
         {
             bool audio = msg->what() == kWhatAudioNotify;
 
+            int32_t currentDecoderGeneration =
+                (audio? mAudioDecoderGeneration : mVideoDecoderGeneration);
+            int32_t requesterGeneration = currentDecoderGeneration - 1;
+            CHECK(msg->findInt32("generation", &requesterGeneration));
+
+            if (requesterGeneration != currentDecoderGeneration) {
+                ALOGV("got message from old %s decoder, generation(%d:%d)",
+                        audio ? "audio" : "video", requesterGeneration,
+                        currentDecoderGeneration);
+                sp<AMessage> reply;
+                if (!(msg->findMessage("reply", &reply))) {
+                    return;
+                }
+
+                reply->setInt32("err", INFO_DISCONTINUITY);
+                reply->post();
+                return;
+            }
+
             int32_t what;
             CHECK(msg->findInt32("what", &what));
 
@@ -735,7 +765,7 @@
                     ALOGV("initiating %s decoder shutdown",
                          audio ? "audio" : "video");
 
-                    (audio ? mAudioDecoder : mVideoDecoder)->initiateShutdown();
+                    getDecoder(audio)->initiateShutdown();
 
                     if (audio) {
                         mFlushingAudio = SHUTTING_DOWN_DECODER;
@@ -782,6 +812,14 @@
                     err = UNKNOWN_ERROR;
                 }
                 mRenderer->queueEOS(audio, err);
+                if (audio && mFlushingAudio != NONE) {
+                    mAudioDecoder.clear();
+                    mFlushingAudio = SHUT_DOWN;
+                } else if (!audio && mFlushingVideo != NONE){
+                    mVideoDecoder.clear();
+                    mFlushingVideo = SHUT_DOWN;
+                }
+                finishFlushIfPossible();
             } else if (what == Decoder::kWhatDrainThisBuffer) {
                 renderBuffer(audio, msg);
             } else {
@@ -831,6 +869,7 @@
             } else if (what == Renderer::kWhatPosition) {
                 int64_t positionUs;
                 CHECK(msg->findInt64("positionUs", &positionUs));
+                mCurrentPositionUs = positionUs;
 
                 CHECK(msg->findInt64("videoLateByUs", &mVideoLateByUs));
 
@@ -943,11 +982,13 @@
 }
 
 void NuPlayer::finishFlushIfPossible() {
-    if (mFlushingAudio != FLUSHED && mFlushingAudio != SHUT_DOWN) {
+    if (mFlushingAudio != NONE && mFlushingAudio != FLUSHED
+            && mFlushingAudio != SHUT_DOWN) {
         return;
     }
 
-    if (mFlushingVideo != FLUSHED && mFlushingVideo != SHUT_DOWN) {
+    if (mFlushingVideo != NONE && mFlushingVideo != FLUSHED
+            && mFlushingVideo != SHUT_DOWN) {
         return;
     }
 
@@ -958,11 +999,11 @@
         mTimeDiscontinuityPending = false;
     }
 
-    if (mAudioDecoder != NULL) {
+    if (mAudioDecoder != NULL && mFlushingAudio == FLUSHED) {
         mAudioDecoder->signalResume();
     }
 
-    if (mVideoDecoder != NULL) {
+    if (mVideoDecoder != NULL && mFlushingVideo == FLUSHED) {
         mVideoDecoder->signalResume();
     }
 
@@ -1061,6 +1102,7 @@
             }
             ALOGV("openAudioSink: try to open AudioSink in offload mode");
             flags |= AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD;
+            flags &= ~AUDIO_OUTPUT_FLAG_DEEP_BUFFER;
             audioSinkChanged = true;
             mAudioSink->close();
             err = mAudioSink->open(
@@ -1150,17 +1192,21 @@
         }
     }
 
-    sp<AMessage> notify =
-        new AMessage(audio ? kWhatAudioNotify : kWhatVideoNotify,
-                     id());
-
     if (audio) {
+        sp<AMessage> notify = new AMessage(kWhatAudioNotify, id());
+        ++mAudioDecoderGeneration;
+        notify->setInt32("generation", mAudioDecoderGeneration);
+
         if (mOffloadAudio) {
             *decoder = new DecoderPassThrough(notify);
         } else {
             *decoder = new Decoder(notify);
         }
     } else {
+        sp<AMessage> notify = new AMessage(kWhatVideoNotify, id());
+        ++mVideoDecoderGeneration;
+        notify->setInt32("generation", mVideoDecoderGeneration);
+
         *decoder = new Decoder(notify, mNativeWindow);
     }
     (*decoder)->init();
@@ -1195,8 +1241,8 @@
     sp<AMessage> reply;
     CHECK(msg->findMessage("reply", &reply));
 
-    if ((audio && IsFlushingState(mFlushingAudio))
-            || (!audio && IsFlushingState(mFlushingVideo))) {
+    if ((audio && mFlushingAudio != NONE)
+            || (!audio && mFlushingVideo != NONE)) {
         reply->setInt32("err", INFO_DISCONTINUITY);
         reply->post();
         return OK;
@@ -1256,7 +1302,24 @@
                 mTimeDiscontinuityPending =
                     mTimeDiscontinuityPending || timeChange;
 
-                if (mFlushingAudio == NONE && mFlushingVideo == NONE) {
+                bool seamlessFormatChange = false;
+                sp<AMessage> newFormat = mSource->getFormat(audio);
+                if (formatChange) {
+                    seamlessFormatChange =
+                        getDecoder(audio)->supportsSeamlessFormatChange(newFormat);
+                    // treat seamless format change separately
+                    formatChange = !seamlessFormatChange;
+                }
+                bool shutdownOrFlush = formatChange || timeChange;
+
+                // We want to queue up scan-sources only once per discontinuity.
+                // We control this by doing it only if neither audio nor video are
+                // flushing or shutting down.  (After handling 1st discontinuity, one
+                // of the flushing states will not be NONE.)
+                // No need to scan sources if this discontinuity does not result
+                // in a flush or shutdown, as the flushing state will stay NONE.
+                if (mFlushingAudio == NONE && mFlushingVideo == NONE &&
+                        shutdownOrFlush) {
                     // And we'll resume scanning sources once we're done
                     // flushing.
                     mDeferredActions.push_front(
@@ -1264,27 +1327,19 @@
                                 &NuPlayer::performScanSources));
                 }
 
-                if (formatChange || timeChange) {
-
-                    sp<AMessage> newFormat = mSource->getFormat(audio);
-                    sp<Decoder> &decoder = audio ? mAudioDecoder : mVideoDecoder;
-                    if (formatChange && !decoder->supportsSeamlessFormatChange(newFormat)) {
-                        flushDecoder(audio, /* needShutdown = */ true);
-                    } else {
-                        flushDecoder(audio, /* needShutdown = */ false);
-                        err = OK;
-                    }
+                if (formatChange /* not seamless */) {
+                    // must change decoder
+                    flushDecoder(audio, /* needShutdown = */ true);
+                } else if (timeChange) {
+                    // need to flush
+                    flushDecoder(audio, /* needShutdown = */ false, newFormat);
+                    err = OK;
+                } else if (seamlessFormatChange) {
+                    // reuse existing decoder and don't flush
+                    updateDecoderFormatWithoutFlush(audio, newFormat);
+                    err = OK;
                 } else {
                     // This stream is unaffected by the discontinuity
-
-                    if (audio) {
-                        mFlushingAudio = FLUSHED;
-                    } else {
-                        mFlushingVideo = FLUSHED;
-                    }
-
-                    finishFlushIfPossible();
-
                     return -EWOULDBLOCK;
                 }
             }
@@ -1335,7 +1390,8 @@
     sp<AMessage> reply;
     CHECK(msg->findMessage("reply", &reply));
 
-    if (IsFlushingState(audio ? mFlushingAudio : mFlushingVideo)) {
+    if ((audio && mFlushingAudio != NONE)
+            || (!audio && mFlushingVideo != NONE)) {
         // We're currently attempting to flush the decoder, in order
         // to complete this, the decoder wants all its buffers back,
         // so we don't want any output buffers it sent us (from before
@@ -1460,50 +1516,57 @@
     driver->notifyListener(msg, ext1, ext2, in);
 }
 
-void NuPlayer::flushDecoder(bool audio, bool needShutdown) {
+void NuPlayer::flushDecoder(
+        bool audio, bool needShutdown, const sp<AMessage> &newFormat) {
     ALOGV("[%s] flushDecoder needShutdown=%d",
           audio ? "audio" : "video", needShutdown);
 
-    if ((audio && mAudioDecoder == NULL) || (!audio && mVideoDecoder == NULL)) {
+    const sp<Decoder> &decoder = getDecoder(audio);
+    if (decoder == NULL) {
         ALOGI("flushDecoder %s without decoder present",
              audio ? "audio" : "video");
+        return;
     }
 
     // Make sure we don't continue to scan sources until we finish flushing.
     ++mScanSourcesGeneration;
     mScanSourcesPending = false;
 
-    (audio ? mAudioDecoder : mVideoDecoder)->signalFlush();
+    decoder->signalFlush(newFormat);
     mRenderer->flush(audio);
 
     FlushStatus newStatus =
         needShutdown ? FLUSHING_DECODER_SHUTDOWN : FLUSHING_DECODER;
 
     if (audio) {
-        CHECK(mFlushingAudio == NONE
-                || mFlushingAudio == AWAITING_DISCONTINUITY);
-
+        ALOGE_IF(mFlushingAudio != NONE,
+                "audio flushDecoder() is called in state %d", mFlushingAudio);
         mFlushingAudio = newStatus;
-
-        if (mFlushingVideo == NONE) {
-            mFlushingVideo = (mVideoDecoder != NULL)
-                ? AWAITING_DISCONTINUITY
-                : FLUSHED;
-        }
     } else {
-        CHECK(mFlushingVideo == NONE
-                || mFlushingVideo == AWAITING_DISCONTINUITY);
-
+        ALOGE_IF(mFlushingVideo != NONE,
+                "video flushDecoder() is called in state %d", mFlushingVideo);
         mFlushingVideo = newStatus;
 
-        if (mFlushingAudio == NONE) {
-            mFlushingAudio = (mAudioDecoder != NULL)
-                ? AWAITING_DISCONTINUITY
-                : FLUSHED;
+        if (mCCDecoder != NULL) {
+            mCCDecoder->flush();
         }
     }
 }
 
+void NuPlayer::updateDecoderFormatWithoutFlush(
+        bool audio, const sp<AMessage> &format) {
+    ALOGV("[%s] updateDecoderFormatWithoutFlush", audio ? "audio" : "video");
+
+    const sp<Decoder> &decoder = getDecoder(audio);
+    if (decoder == NULL) {
+        ALOGI("updateDecoderFormatWithoutFlush %s without decoder present",
+             audio ? "audio" : "video");
+        return;
+    }
+
+    decoder->signalUpdateFormat(format);
+}
+
 void NuPlayer::queueDecoderShutdown(
         bool audio, bool video, const sp<AMessage> &reply) {
     ALOGI("queueDecoderShutdown audio=%d, video=%d", audio, video);
@@ -1590,18 +1653,6 @@
         // an intermediate state, i.e. one more more decoders are currently
         // flushing or shutting down.
 
-        if (mRenderer != NULL) {
-            // There's an edge case where the renderer owns all output
-            // buffers and is paused, therefore the decoder will not read
-            // more input data and will never encounter the matching
-            // discontinuity. To avoid this, we resume the renderer.
-
-            if (mFlushingAudio == AWAITING_DISCONTINUITY
-                    || mFlushingVideo == AWAITING_DISCONTINUITY) {
-                mRenderer->resume();
-            }
-        }
-
         if (mFlushingAudio != NONE || mFlushingVideo != NONE) {
             // We're currently flushing, postpone the reset until that's
             // completed.
@@ -1624,6 +1675,14 @@
           seekTimeUs,
           seekTimeUs / 1E6);
 
+    if (mSource == NULL) {
+        // This happens when reset occurs right before the loop mode
+        // asynchronously seeks to the start of the stream.
+        LOG_ALWAYS_FATAL_IF(mAudioDecoder != NULL || mVideoDecoder != NULL,
+                "mSource is NULL and decoders not NULL audio(%p) video(%p)",
+                mAudioDecoder.get(), mVideoDecoder.get());
+        return;
+    }
     mSource->seekTo(seekTimeUs);
     ++mTimedTextGeneration;
 
@@ -1666,14 +1725,6 @@
 
     mTimeDiscontinuityPending = true;
 
-    if (mFlushingAudio == NONE && (!audio || mAudioDecoder == NULL)) {
-        mFlushingAudio = FLUSHED;
-    }
-
-    if (mFlushingVideo == NONE && (!video || mVideoDecoder == NULL)) {
-        mFlushingVideo = FLUSHED;
-    }
-
     if (audio && mAudioDecoder != NULL) {
         flushDecoder(true /* audio */, true /* needShutdown */);
     }
@@ -1738,6 +1789,13 @@
 
     // XXX - ignore error from setVideoScalingMode for now
     setVideoScalingMode(mVideoScalingMode);
+
+    if (mDriver != NULL) {
+        sp<NuPlayerDriver> driver = mDriver.promote();
+        if (driver != NULL) {
+            driver->notifySetSurfaceComplete();
+        }
+    }
 }
 
 void NuPlayer::onSourceNotify(const sp<AMessage> &msg) {
@@ -1803,6 +1861,15 @@
             break;
         }
 
+        case Source::kWhatBufferingUpdate:
+        {
+            int32_t percentage;
+            CHECK(msg->findInt32("percentage", &percentage));
+
+            notifyListener(MEDIA_BUFFERING_UPDATE, percentage, 0);
+            break;
+        }
+
         case Source::kWhatBufferingStart:
         {
             notifyListener(MEDIA_INFO, MEDIA_INFO_BUFFERING_START, 0);
@@ -1870,6 +1937,12 @@
             break;
         }
 
+        case Source::kWhatDrmNoLicense:
+        {
+            notifyListener(MEDIA_ERROR, MEDIA_ERROR_UNKNOWN, ERROR_DRM_NO_LICENSE);
+            break;
+        }
+
         default:
             TRESPASS();
     }
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.h b/media/libmediaplayerservice/nuplayer/NuPlayer.h
index 48882c5..0c7f531 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.h
@@ -25,6 +25,7 @@
 namespace android {
 
 struct ABuffer;
+struct AMessage;
 struct MetaData;
 struct NuPlayerDriver;
 
@@ -120,6 +121,7 @@
     sp<Source> mSource;
     uint32_t mSourceFlags;
     sp<NativeWindowWrapper> mNativeWindow;
+    int64_t mCurrentPositionUs;
     sp<MediaPlayerBase::AudioSink> mAudioSink;
     sp<Decoder> mVideoDecoder;
     bool mVideoIsAVC;
@@ -129,6 +131,8 @@
     sp<CCDecoder> mCCDecoder;
     sp<Renderer> mRenderer;
     sp<ALooper> mRendererLooper;
+    int32_t mAudioDecoderGeneration;
+    int32_t mVideoDecoderGeneration;
 
     List<sp<Action> > mDeferredActions;
 
@@ -143,7 +147,6 @@
 
     enum FlushStatus {
         NONE,
-        AWAITING_DISCONTINUITY,
         FLUSHING_DECODER,
         FLUSHING_DECODER_SHUTDOWN,
         SHUTTING_DOWN_DECODER,
@@ -168,6 +171,10 @@
 
     bool mStarted;
 
+    inline const sp<Decoder> &getDecoder(bool audio) {
+        return audio ? mAudioDecoder : mVideoDecoder;
+    }
+
     void openAudioSink(const sp<AMessage> &format, bool offloadOnly);
     void closeAudioSink();
 
@@ -184,7 +191,9 @@
 
     void finishFlushIfPossible();
 
-    void flushDecoder(bool audio, bool needShutdown);
+    void flushDecoder(
+            bool audio, bool needShutdown, const sp<AMessage> &newFormat = NULL);
+    void updateDecoderFormatWithoutFlush(bool audio, const sp<AMessage> &format);
 
     static bool IsFlushingState(FlushStatus state, bool *needShutdown = NULL);
 
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
index 8fce2f4..8ce7baf 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
@@ -44,11 +44,11 @@
     // Every decoder has its own looper because MediaCodec operations
     // are blocking, but NuPlayer needs asynchronous operations.
     mDecoderLooper = new ALooper;
-    mDecoderLooper->setName("NuPlayerDecoder");
+    mDecoderLooper->setName("NPDecoder");
     mDecoderLooper->start(false, false, ANDROID_PRIORITY_AUDIO);
 
     mCodecLooper = new ALooper;
-    mCodecLooper->setName("NuPlayerDecoder-MC");
+    mCodecLooper->setName("NPDecoder-CL");
     mCodecLooper->start(false, false, ANDROID_PRIORITY_AUDIO);
 }
 
@@ -71,6 +71,19 @@
     return err;
 }
 
+void NuPlayer::Decoder::rememberCodecSpecificData(const sp<AMessage> &format) {
+    mCSDsForCurrentFormat.clear();
+    for (int32_t i = 0; ; ++i) {
+        AString tag = "csd-";
+        tag.append(i);
+        sp<ABuffer> buffer;
+        if (!format->findBuffer(tag.c_str(), &buffer)) {
+            break;
+        }
+        mCSDsForCurrentFormat.push(buffer);
+    }
+}
+
 void NuPlayer::Decoder::onConfigure(const sp<AMessage> &format) {
     CHECK(mCodec == NULL);
 
@@ -123,6 +136,8 @@
         handleError(err);
         return;
     }
+    rememberCodecSpecificData(format);
+
     // the following should work in configured state
     CHECK_EQ((status_t)OK, mCodec->getOutputFormat(&mOutputFormat));
     CHECK_EQ((status_t)OK, mCodec->getInputFormat(&mInputFormat));
@@ -189,6 +204,12 @@
     msg->post();
 }
 
+void NuPlayer::Decoder::signalUpdateFormat(const sp<AMessage> &format) {
+    sp<AMessage> msg = new AMessage(kWhatUpdateFormat, id());
+    msg->setMessage("format", format);
+    msg->post();
+}
+
 status_t NuPlayer::Decoder::getInputBuffers(Vector<sp<ABuffer> > *buffers) const {
     sp<AMessage> msg = new AMessage(kWhatGetInputBuffers, id());
     msg->setPointer("buffers", buffers);
@@ -199,6 +220,8 @@
 
 void NuPlayer::Decoder::handleError(int32_t err)
 {
+    mCodec->release();
+
     sp<AMessage> notify = mNotify->dup();
     notify->setInt32("what", kWhatError);
     notify->setInt32("err", err);
@@ -229,6 +252,15 @@
     reply->setSize("buffer-ix", bufferIx);
     reply->setInt32("generation", mBufferGeneration);
 
+    if (!mCSDsToSubmit.isEmpty()) {
+        sp<ABuffer> buffer = mCSDsToSubmit.itemAt(0);
+        ALOGI("[%s] resubmitting CSD", mComponentName.c_str());
+        reply->setBuffer("buffer", buffer);
+        mCSDsToSubmit.removeAt(0);
+        reply->post();
+        return true;
+    }
+
     sp<AMessage> notify = mNotify->dup();
     notify->setInt32("what", kWhatFillThisBuffer);
     notify->setBuffer("buffer", mInputBuffers[bufferIx]);
@@ -312,10 +344,12 @@
         uint32_t flags = 0;
         CHECK(buffer->meta()->findInt64("timeUs", &timeUs));
 
-        int32_t eos;
-        // we do not expect CODECCONFIG or SYNCFRAME for decoder
+        int32_t eos, csd;
+        // we do not expect SYNCFRAME for decoder
         if (buffer->meta()->findInt32("eos", &eos) && eos) {
             flags |= MediaCodec::BUFFER_FLAG_EOS;
+        } else if (buffer->meta()->findInt32("csd", &csd) && csd) {
+            flags |= MediaCodec::BUFFER_FLAG_CODECCONFIG;
         }
 
         // copy into codec buffer
@@ -448,6 +482,7 @@
     status_t err = OK;
     if (mCodec != NULL) {
         err = mCodec->flush();
+        mCSDsToSubmit = mCSDsForCurrentFormat; // copy operator
         ++mBufferGeneration;
     }
 
@@ -478,10 +513,13 @@
 
         if (mNativeWindow != NULL) {
             // reconnect to surface as MediaCodec disconnected from it
-            CHECK_EQ((int)NO_ERROR,
+            status_t error =
                     native_window_api_connect(
                             mNativeWindow->getNativeWindow().get(),
-                            NATIVE_WINDOW_API_MEDIA));
+                            NATIVE_WINDOW_API_MEDIA);
+            ALOGW_IF(error != NO_ERROR,
+                    "[%s] failed to connect to native window, error=%d",
+                    mComponentName.c_str(), error);
         }
         mComponentName = "decoder";
     }
@@ -512,6 +550,14 @@
             break;
         }
 
+        case kWhatUpdateFormat:
+        {
+            sp<AMessage> format;
+            CHECK(msg->findMessage("format", &format));
+            rememberCodecSpecificData(format);
+            break;
+        }
+
         case kWhatGetInputBuffers:
         {
             uint32_t replyID;
@@ -563,6 +609,10 @@
 
         case kWhatFlush:
         {
+            sp<AMessage> format;
+            if (msg->findMessage("new-format", &format)) {
+                rememberCodecSpecificData(format);
+            }
             onFlush();
             break;
         }
@@ -585,8 +635,12 @@
     }
 }
 
-void NuPlayer::Decoder::signalFlush() {
-    (new AMessage(kWhatFlush, id()))->post();
+void NuPlayer::Decoder::signalFlush(const sp<AMessage> &format) {
+    sp<AMessage> msg = new AMessage(kWhatFlush, id());
+    if (format != NULL) {
+        msg->setMessage("new-format", format);
+    }
+    msg->post();
 }
 
 void NuPlayer::Decoder::signalResume() {
@@ -662,72 +716,28 @@
     return seamless;
 }
 
-struct NuPlayer::CCDecoder::CCData {
+struct CCData {
     CCData(uint8_t type, uint8_t data1, uint8_t data2)
         : mType(type), mData1(data1), mData2(data2) {
     }
+    bool getChannel(size_t *channel) const {
+        if (mData1 >= 0x10 && mData1 <= 0x1f) {
+            *channel = (mData1 >= 0x18 ? 1 : 0) + (mType ? 2 : 0);
+            return true;
+        }
+        return false;
+    }
 
     uint8_t mType;
     uint8_t mData1;
     uint8_t mData2;
 };
 
-NuPlayer::CCDecoder::CCDecoder(const sp<AMessage> &notify)
-    : mNotify(notify),
-      mTrackCount(0),
-      mSelectedTrack(-1) {
-}
-
-size_t NuPlayer::CCDecoder::getTrackCount() const {
-    return mTrackCount;
-}
-
-sp<AMessage> NuPlayer::CCDecoder::getTrackInfo(size_t index) const {
-    CHECK(index == 0);
-
-    sp<AMessage> format = new AMessage();
-
-    format->setInt32("type", MEDIA_TRACK_TYPE_SUBTITLE);
-    format->setString("language", "und");
-    format->setString("mime", MEDIA_MIMETYPE_TEXT_CEA_608);
-    format->setInt32("auto", 1);
-    format->setInt32("default", 1);
-    format->setInt32("forced", 0);
-
-    return format;
-}
-
-status_t NuPlayer::CCDecoder::selectTrack(size_t index, bool select) {
-    CHECK(index < mTrackCount);
-
-    if (select) {
-        if (mSelectedTrack == (ssize_t)index) {
-            ALOGE("track %zu already selected", index);
-            return BAD_VALUE;
-        }
-        ALOGV("selected track %zu", index);
-        mSelectedTrack = index;
-    } else {
-        if (mSelectedTrack != (ssize_t)index) {
-            ALOGE("track %zu is not selected", index);
-            return BAD_VALUE;
-        }
-        ALOGV("unselected track %zu", index);
-        mSelectedTrack = -1;
-    }
-
-    return OK;
-}
-
-bool NuPlayer::CCDecoder::isSelected() const {
-    return mSelectedTrack >= 0 && mSelectedTrack < (int32_t)mTrackCount;
-}
-
-bool NuPlayer::CCDecoder::isNullPad(CCData *cc) const {
+static bool isNullPad(CCData *cc) {
     return cc->mData1 < 0x10 && cc->mData2 < 0x10;
 }
 
-void NuPlayer::CCDecoder::dumpBytePair(const sp<ABuffer> &ccBuf) const {
+static void dumpBytePair(const sp<ABuffer> &ccBuf) {
     size_t offset = 0;
     AString out;
 
@@ -789,6 +799,78 @@
     ALOGI("%s", out.c_str());
 }
 
+NuPlayer::CCDecoder::CCDecoder(const sp<AMessage> &notify)
+    : mNotify(notify),
+      mCurrentChannel(0),
+      mSelectedTrack(-1) {
+      for (size_t i = 0; i < sizeof(mTrackIndices)/sizeof(mTrackIndices[0]); ++i) {
+          mTrackIndices[i] = -1;
+      }
+}
+
+size_t NuPlayer::CCDecoder::getTrackCount() const {
+    return mFoundChannels.size();
+}
+
+sp<AMessage> NuPlayer::CCDecoder::getTrackInfo(size_t index) const {
+    if (!isTrackValid(index)) {
+        return NULL;
+    }
+
+    sp<AMessage> format = new AMessage();
+
+    format->setInt32("type", MEDIA_TRACK_TYPE_SUBTITLE);
+    format->setString("language", "und");
+    format->setString("mime", MEDIA_MIMETYPE_TEXT_CEA_608);
+    //CC1, field 0 channel 0
+    bool isDefaultAuto = (mFoundChannels[index] == 0);
+    format->setInt32("auto", isDefaultAuto);
+    format->setInt32("default", isDefaultAuto);
+    format->setInt32("forced", 0);
+
+    return format;
+}
+
+status_t NuPlayer::CCDecoder::selectTrack(size_t index, bool select) {
+    if (!isTrackValid(index)) {
+        return BAD_VALUE;
+    }
+
+    if (select) {
+        if (mSelectedTrack == (ssize_t)index) {
+            ALOGE("track %zu already selected", index);
+            return BAD_VALUE;
+        }
+        ALOGV("selected track %zu", index);
+        mSelectedTrack = index;
+    } else {
+        if (mSelectedTrack != (ssize_t)index) {
+            ALOGE("track %zu is not selected", index);
+            return BAD_VALUE;
+        }
+        ALOGV("unselected track %zu", index);
+        mSelectedTrack = -1;
+    }
+
+    return OK;
+}
+
+bool NuPlayer::CCDecoder::isSelected() const {
+    return mSelectedTrack >= 0 && mSelectedTrack < (int32_t) getTrackCount();
+}
+
+bool NuPlayer::CCDecoder::isTrackValid(size_t index) const {
+    return index < getTrackCount();
+}
+
+int32_t NuPlayer::CCDecoder::getTrackIndex(size_t channel) const {
+    if (channel < sizeof(mTrackIndices)/sizeof(mTrackIndices[0])) {
+        return mTrackIndices[channel];
+    }
+    return -1;
+}
+
+// returns true if a new CC track is found
 bool NuPlayer::CCDecoder::extractFromSEI(const sp<ABuffer> &accessUnit) {
     int64_t timeUs;
     CHECK(accessUnit->meta()->findInt64("timeUs", &timeUs));
@@ -798,7 +880,7 @@
         return false;
     }
 
-    bool hasCC = false;
+    bool trackAdded = false;
 
     NALBitReader br(sei->data() + 1, sei->size() - 1);
     // sei_message()
@@ -833,8 +915,6 @@
                     && itu_t_t35_provider_code == 0x0031
                     && user_identifier == 'GA94'
                     && user_data_type_code == 0x3) {
-                hasCC = true;
-
                 // MPEG_cc_data()
                 // ATSC A/53 Part 4: 6.2.3.1
                 br.skipBits(1); //process_em_data_flag
@@ -864,6 +944,12 @@
                                 && (cc_type == 0 || cc_type == 1)) {
                             CCData cc(cc_type, cc_data_1, cc_data_2);
                             if (!isNullPad(&cc)) {
+                                size_t channel;
+                                if (cc.getChannel(&channel) && getTrackIndex(channel) < 0) {
+                                    mTrackIndices[channel] = mFoundChannels.size();
+                                    mFoundChannels.push_back(channel);
+                                    trackAdded = true;
+                                }
                                 memcpy(ccBuf->data() + ccBuf->size(),
                                         (void *)&cc, sizeof(cc));
                                 ccBuf->setRange(0, ccBuf->size() + sizeof(CCData));
@@ -886,13 +972,33 @@
         br.skipBits(payload_size * 8);
     }
 
-    return hasCC;
+    return trackAdded;
+}
+
+sp<ABuffer> NuPlayer::CCDecoder::filterCCBuf(
+        const sp<ABuffer> &ccBuf, size_t index) {
+    sp<ABuffer> filteredCCBuf = new ABuffer(ccBuf->size());
+    filteredCCBuf->setRange(0, 0);
+
+    size_t cc_count = ccBuf->size() / sizeof(CCData);
+    const CCData* cc_data = (const CCData*)ccBuf->data();
+    for (size_t i = 0; i < cc_count; ++i) {
+        size_t channel;
+        if (cc_data[i].getChannel(&channel)) {
+            mCurrentChannel = channel;
+        }
+        if (mCurrentChannel == mFoundChannels[index]) {
+            memcpy(filteredCCBuf->data() + filteredCCBuf->size(),
+                    (void *)&cc_data[i], sizeof(CCData));
+            filteredCCBuf->setRange(0, filteredCCBuf->size() + sizeof(CCData));
+        }
+    }
+
+    return filteredCCBuf;
 }
 
 void NuPlayer::CCDecoder::decode(const sp<ABuffer> &accessUnit) {
-    if (extractFromSEI(accessUnit) && mTrackCount == 0) {
-        mTrackCount++;
-
+    if (extractFromSEI(accessUnit)) {
         ALOGI("Found CEA-608 track");
         sp<AMessage> msg = mNotify->dup();
         msg->setInt32("what", kWhatTrackAdded);
@@ -902,13 +1008,18 @@
 }
 
 void NuPlayer::CCDecoder::display(int64_t timeUs) {
+    if (!isTrackValid(mSelectedTrack)) {
+        ALOGE("Could not find current track(index=%d)", mSelectedTrack);
+        return;
+    }
+
     ssize_t index = mCCMap.indexOfKey(timeUs);
     if (index < 0) {
         ALOGV("cc for timestamp %" PRId64 " not found", timeUs);
         return;
     }
 
-    sp<ABuffer> &ccBuf = mCCMap.editValueAt(index);
+    sp<ABuffer> ccBuf = filterCCBuf(mCCMap.valueAt(index), mSelectedTrack);
 
     if (ccBuf->size() > 0) {
 #if 0
@@ -929,5 +1040,9 @@
     mCCMap.removeItemsAt(0, index + 1);
 }
 
+void NuPlayer::CCDecoder::flush() {
+    mCCMap.clear();
+}
+
 }  // namespace android
 
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h
index c6fc237..cc1bdff 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h
@@ -36,7 +36,8 @@
     virtual void init();
 
     status_t getInputBuffers(Vector<sp<ABuffer> > *dstBuffers) const;
-    virtual void signalFlush();
+    virtual void signalFlush(const sp<AMessage> &format = NULL);
+    virtual void signalUpdateFormat(const sp<AMessage> &format);
     virtual void signalResume();
     virtual void initiateShutdown();
 
@@ -67,6 +68,7 @@
         kWhatRenderBuffer       = 'rndr',
         kWhatFlush              = 'flus',
         kWhatShutdown           = 'shuD',
+        kWhatUpdateFormat       = 'uFmt',
     };
 
     sp<AMessage> mNotify;
@@ -80,6 +82,8 @@
 
     Vector<sp<ABuffer> > mInputBuffers;
     Vector<sp<ABuffer> > mOutputBuffers;
+    Vector<sp<ABuffer> > mCSDsForCurrentFormat;
+    Vector<sp<ABuffer> > mCSDsToSubmit;
     Vector<bool> mInputBufferIsDequeued;
     Vector<MediaBuffer *> mMediaBuffers;
 
@@ -103,6 +107,7 @@
     AString mComponentName;
 
     bool supportsSeamlessAudioFormatChange(const sp<AMessage> &targetFormat) const;
+    void rememberCodecSpecificData(const sp<AMessage> &format);
 
     DISALLOW_EVIL_CONSTRUCTORS(Decoder);
 };
@@ -121,18 +126,20 @@
     bool isSelected() const;
     void decode(const sp<ABuffer> &accessUnit);
     void display(int64_t timeUs);
+    void flush();
 
 private:
-    struct CCData;
-
     sp<AMessage> mNotify;
     KeyedVector<int64_t, sp<ABuffer> > mCCMap;
-    size_t mTrackCount;
+    size_t mCurrentChannel;
     int32_t mSelectedTrack;
+    int32_t mTrackIndices[4];
+    Vector<size_t> mFoundChannels;
 
-    bool isNullPad(CCData *cc) const;
-    void dumpBytePair(const sp<ABuffer> &ccBuf) const;
+    bool isTrackValid(size_t index) const;
+    int32_t getTrackIndex(size_t channel) const;
     bool extractFromSEI(const sp<ABuffer> &accessUnit);
+    sp<ABuffer> filterCCBuf(const sp<ABuffer> &ccBuf, size_t index);
 
     DISALLOW_EVIL_CONSTRUCTORS(CCDecoder);
 };
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
index bf7542f..09324ae 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
@@ -34,8 +34,11 @@
     : mState(STATE_IDLE),
       mIsAsyncPrepare(false),
       mAsyncResult(UNKNOWN_ERROR),
+      mSetSurfaceInProgress(false),
       mDurationUs(-1),
       mPositionUs(-1),
+      mNotifyTimeRealUs(-1),
+      mPauseStartedTimeUs(-1),
       mNumFramesTotal(0),
       mNumFramesDropped(0),
       mLooper(new ALooper),
@@ -134,6 +137,10 @@
         const sp<IGraphicBufferProducer> &bufferProducer) {
     Mutex::Autolock autoLock(mLock);
 
+    if (mSetSurfaceInProgress) {
+        return INVALID_OPERATION;
+    }
+
     switch (mState) {
         case STATE_SET_DATASOURCE_PENDING:
         case STATE_RESET_IN_PROGRESS:
@@ -143,8 +150,14 @@
             break;
     }
 
+    mSetSurfaceInProgress = true;
+
     mPlayer->setVideoSurfaceTextureAsync(bufferProducer);
 
+    while (mSetSurfaceInProgress) {
+        mCondition.wait(mLock);
+    }
+
     return OK;
 }
 
@@ -167,6 +180,16 @@
                 mCondition.wait(mLock);
             }
             return (mState == STATE_PREPARED) ? OK : UNKNOWN_ERROR;
+        case STATE_STOPPED:
+            // this is really just paused. handle as seek to start
+            mAtEOS = false;
+            mState = STATE_STOPPED_AND_PREPARING;
+            mIsAsyncPrepare = false;
+            mPlayer->seekToAsync(0);
+            while (mState == STATE_STOPPED_AND_PREPARING) {
+                mCondition.wait(mLock);
+            }
+            return (mState == STATE_STOPPED_AND_PREPARED) ? OK : UNKNOWN_ERROR;
         default:
             return INVALID_OPERATION;
     };
@@ -181,6 +204,13 @@
             mIsAsyncPrepare = true;
             mPlayer->prepareAsync();
             return OK;
+        case STATE_STOPPED:
+            // this is really just paused. handle as seek to start
+            mAtEOS = false;
+            mState = STATE_STOPPED_AND_PREPARING;
+            mIsAsyncPrepare = true;
+            mPlayer->seekToAsync(0);
+            return OK;
         default:
             return INVALID_OPERATION;
     };
@@ -210,7 +240,7 @@
 
             if (mStartupSeekTimeUs >= 0) {
                 if (mStartupSeekTimeUs == 0) {
-                    notifySeekComplete();
+                    notifySeekComplete_l();
                 } else {
                     mPlayer->seekToAsync(mStartupSeekTimeUs);
                 }
@@ -221,11 +251,20 @@
         }
 
         case STATE_RUNNING:
+        {
+            if (mAtEOS) {
+                mPlayer->seekToAsync(0);
+                mAtEOS = false;
+                mPositionUs = -1;
+            }
             break;
+        }
 
         case STATE_PAUSED:
+        case STATE_STOPPED_AND_PREPARED:
         {
             mPlayer->resume();
+            mPositionUs -= ALooper::GetNowUs() - mPauseStartedTimeUs;
             break;
         }
 
@@ -234,12 +273,37 @@
     }
 
     mState = STATE_RUNNING;
+    mPauseStartedTimeUs = -1;
 
     return OK;
 }
 
 status_t NuPlayerDriver::stop() {
-    return pause();
+    Mutex::Autolock autoLock(mLock);
+
+    switch (mState) {
+        case STATE_RUNNING:
+            mPlayer->pause();
+            // fall through
+
+        case STATE_PAUSED:
+            mState = STATE_STOPPED;
+            notifyListener_l(MEDIA_STOPPED);
+            break;
+
+        case STATE_PREPARED:
+        case STATE_STOPPED:
+        case STATE_STOPPED_AND_PREPARING:
+        case STATE_STOPPED_AND_PREPARED:
+            mState = STATE_STOPPED;
+            break;
+
+        default:
+            return INVALID_OPERATION;
+    }
+    setPauseStartedTimeIfNeeded();
+
+    return OK;
 }
 
 status_t NuPlayerDriver::pause() {
@@ -251,7 +315,9 @@
             return OK;
 
         case STATE_RUNNING:
-            notifyListener(MEDIA_PAUSED);
+            setPauseStartedTimeIfNeeded();
+            mState = STATE_PAUSED;
+            notifyListener_l(MEDIA_PAUSED);
             mPlayer->pause();
             break;
 
@@ -259,8 +325,6 @@
             return INVALID_OPERATION;
     }
 
-    mState = STATE_PAUSED;
-
     return OK;
 }
 
@@ -280,7 +344,7 @@
             // pretend that the seek completed. It will actually happen when starting playback.
             // TODO: actually perform the seek here, so the player is ready to go at the new
             // location
-            notifySeekComplete();
+            notifySeekComplete_l();
             break;
         }
 
@@ -289,7 +353,7 @@
         {
             mAtEOS = false;
             // seeks can take a while, so we essentially paused
-            notifyListener(MEDIA_PAUSED);
+            notifyListener_l(MEDIA_PAUSED);
             mPlayer->seekToAsync(seekTimeUs);
             break;
         }
@@ -298,6 +362,8 @@
             return INVALID_OPERATION;
     }
 
+    mPositionUs = seekTimeUs;
+    mNotifyTimeRealUs = -1;
     return OK;
 }
 
@@ -306,8 +372,12 @@
 
     if (mPositionUs < 0) {
         *msec = 0;
+    } else if (mNotifyTimeRealUs == -1) {
+        *msec = mPositionUs / 1000;
     } else {
-        *msec = (mPositionUs + 500ll) / 1000;
+        int64_t nowUs =
+                (isPlaying() ?  ALooper::GetNowUs() : mPauseStartedTimeUs);
+        *msec = (mPositionUs + nowUs - mNotifyTimeRealUs + 500ll) / 1000;
     }
 
     return OK;
@@ -340,7 +410,7 @@
         {
             CHECK(mIsAsyncPrepare);
 
-            notifyListener(MEDIA_PREPARED);
+            notifyListener_l(MEDIA_PREPARED);
             break;
         }
 
@@ -348,7 +418,9 @@
             break;
     }
 
-    notifyListener(MEDIA_STOPPED);
+    if (mState != STATE_STOPPED) {
+        notifyListener_l(MEDIA_STOPPED);
+    }
 
     mState = STATE_RESET_IN_PROGRESS;
     mPlayer->resetAsync();
@@ -472,6 +544,15 @@
     mCondition.broadcast();
 }
 
+void NuPlayerDriver::notifySetSurfaceComplete() {
+    Mutex::Autolock autoLock(mLock);
+
+    CHECK(mSetSurfaceInProgress);
+    mSetSurfaceInProgress = false;
+
+    mCondition.broadcast();
+}
+
 void NuPlayerDriver::notifyDuration(int64_t durationUs) {
     Mutex::Autolock autoLock(mLock);
     mDurationUs = durationUs;
@@ -479,11 +560,32 @@
 
 void NuPlayerDriver::notifyPosition(int64_t positionUs) {
     Mutex::Autolock autoLock(mLock);
-    mPositionUs = positionUs;
+    if (isPlaying()) {
+        mPositionUs = positionUs;
+        mNotifyTimeRealUs = ALooper::GetNowUs();
+    }
 }
 
 void NuPlayerDriver::notifySeekComplete() {
-    notifyListener(MEDIA_SEEK_COMPLETE);
+    Mutex::Autolock autoLock(mLock);
+    notifySeekComplete_l();
+}
+
+void NuPlayerDriver::notifySeekComplete_l() {
+    bool wasSeeking = true;
+    if (mState == STATE_STOPPED_AND_PREPARING) {
+        wasSeeking = false;
+        mState = STATE_STOPPED_AND_PREPARED;
+        mCondition.broadcast();
+        if (!mIsAsyncPrepare) {
+            // if we are preparing synchronously, no need to notify listener
+            return;
+        }
+    } else if (mState == STATE_STOPPED) {
+        // no need to notify listener
+        return;
+    }
+    notifyListener_l(wasSeeking ? MEDIA_SEEK_COMPLETE : MEDIA_PREPARED);
 }
 
 void NuPlayerDriver::notifyFrameStats(
@@ -515,11 +617,19 @@
 
 void NuPlayerDriver::notifyListener(
         int msg, int ext1, int ext2, const Parcel *in) {
+    Mutex::Autolock autoLock(mLock);
+    notifyListener_l(msg, ext1, ext2, in);
+}
+
+void NuPlayerDriver::notifyListener_l(
+        int msg, int ext1, int ext2, const Parcel *in) {
     switch (msg) {
         case MEDIA_PLAYBACK_COMPLETE:
         {
-            if (mLooping) {
+            if (mLooping && mState != STATE_RESET_IN_PROGRESS) {
+                mLock.unlock();
                 mPlayer->seekToAsync(0);
+                mLock.lock();
                 break;
             }
             // fall through
@@ -528,6 +638,7 @@
         case MEDIA_ERROR:
         {
             mAtEOS = true;
+            setPauseStartedTimeIfNeeded();
             break;
         }
 
@@ -535,7 +646,9 @@
             break;
     }
 
+    mLock.unlock();
     sendEvent(msg, ext1, ext2, in);
+    mLock.lock();
 }
 
 void NuPlayerDriver::notifySetDataSourceCompleted(status_t err) {
@@ -564,15 +677,17 @@
     mAsyncResult = err;
 
     if (err == OK) {
-        if (mIsAsyncPrepare) {
-            notifyListener(MEDIA_PREPARED);
-        }
+        // update state before notifying client, so that if client calls back into NuPlayerDriver
+        // in response, NuPlayerDriver has the right state
         mState = STATE_PREPARED;
-    } else {
         if (mIsAsyncPrepare) {
-            notifyListener(MEDIA_ERROR, MEDIA_ERROR_UNKNOWN, err);
+            notifyListener_l(MEDIA_PREPARED);
         }
+    } else {
         mState = STATE_UNPREPARED;
+        if (mIsAsyncPrepare) {
+            notifyListener_l(MEDIA_ERROR, MEDIA_ERROR_UNKNOWN, err);
+        }
     }
 
     mCondition.broadcast();
@@ -584,4 +699,10 @@
     mPlayerFlags = flags;
 }
 
+void NuPlayerDriver::setPauseStartedTimeIfNeeded() {
+    if (mPauseStartedTimeUs == -1) {
+        mPauseStartedTimeUs = ALooper::GetNowUs();
+    }
+}
+
 }  // namespace android
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.h b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.h
index a9ff8b6..e81d605 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.h
@@ -66,9 +66,11 @@
     void notifySetDataSourceCompleted(status_t err);
     void notifyPrepareCompleted(status_t err);
     void notifyResetComplete();
+    void notifySetSurfaceComplete();
     void notifyDuration(int64_t durationUs);
     void notifyPosition(int64_t positionUs);
     void notifySeekComplete();
+    void notifySeekComplete_l();
     void notifyFrameStats(int64_t numFramesTotal, int64_t numFramesDropped);
     void notifyListener(int msg, int ext1 = 0, int ext2 = 0, const Parcel *in = NULL);
     void notifyFlagsChanged(uint32_t flags);
@@ -86,6 +88,9 @@
         STATE_RUNNING,
         STATE_PAUSED,
         STATE_RESET_IN_PROGRESS,
+        STATE_STOPPED,                  // equivalent to PAUSED
+        STATE_STOPPED_AND_PREPARING,    // equivalent to PAUSED, but seeking
+        STATE_STOPPED_AND_PREPARED,     // equivalent to PAUSED, but seek complete
     };
 
     mutable Mutex mLock;
@@ -98,8 +103,11 @@
 
     // The following are protected through "mLock"
     // >>>
+    bool mSetSurfaceInProgress;
     int64_t mDurationUs;
     int64_t mPositionUs;
+    int64_t mNotifyTimeRealUs;
+    int64_t mPauseStartedTimeUs;
     int64_t mNumFramesTotal;
     int64_t mNumFramesDropped;
     // <<<
@@ -114,6 +122,8 @@
     int64_t mStartupSeekTimeUs;
 
     status_t prepare_l();
+    void notifyListener_l(int msg, int ext1 = 0, int ext2 = 0, const Parcel *in = NULL);
+    void setPauseStartedTimeIfNeeded();
 
     DISALLOW_EVIL_CONSTRUCTORS(NuPlayerDriver);
 };
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
index 3640038..aad6e93 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
@@ -58,7 +58,8 @@
       mVideoRenderingStartGeneration(0),
       mAudioRenderingStartGeneration(0),
       mLastPositionUpdateUs(-1ll),
-      mVideoLateByUs(0ll) {
+      mVideoLateByUs(0ll),
+      mVideoSampleReceived(false) {
 }
 
 NuPlayer::Renderer::~Renderer() {
@@ -93,10 +94,14 @@
     {
         Mutex::Autolock autoLock(mFlushLock);
         if (audio) {
-            CHECK(!mFlushingAudio);
+            if (mFlushingAudio) {
+                return;
+            }
             mFlushingAudio = true;
         } else {
-            CHECK(!mFlushingVideo);
+            if (mFlushingVideo) {
+                return;
+            }
             mFlushingVideo = true;
         }
     }
@@ -115,6 +120,14 @@
     mSyncQueues = false;
 }
 
+void NuPlayer::Renderer::signalAudioSinkChanged() {
+    (new AMessage(kWhatAudioSinkChanged, id()))->post();
+}
+
+void NuPlayer::Renderer::signalDisableOffloadAudio() {
+    (new AMessage(kWhatDisableOffloadAudio, id()))->post();
+}
+
 void NuPlayer::Renderer::pause() {
     (new AMessage(kWhatPause, id()))->post();
 }
@@ -251,14 +264,6 @@
     msg->post(delayUs);
 }
 
-void NuPlayer::Renderer::signalAudioSinkChanged() {
-    (new AMessage(kWhatAudioSinkChanged, id()))->post();
-}
-
-void NuPlayer::Renderer::signalDisableOffloadAudio() {
-    (new AMessage(kWhatDisableOffloadAudio, id()))->post();
-}
-
 void NuPlayer::Renderer::prepareForMediaRenderingStart() {
     mAudioRenderingStartGeneration = mAudioQueueGeneration;
     mVideoRenderingStartGeneration = mVideoQueueGeneration;
@@ -311,13 +316,14 @@
 size_t NuPlayer::Renderer::fillAudioBuffer(void *buffer, size_t size) {
     Mutex::Autolock autoLock(mLock);
 
-    if (!offloadingAudio()) {
+    if (!offloadingAudio() || mPaused) {
         return 0;
     }
 
     bool hasEOS = false;
 
     size_t sizeCopied = 0;
+    bool firstEntry = true;
     while (sizeCopied < size && !mAudioQueue.empty()) {
         QueueEntry *entry = &*mAudioQueue.begin();
 
@@ -328,14 +334,14 @@
             break;
         }
 
-        if (entry->mOffset == 0) {
+        if (firstEntry && entry->mOffset == 0) {
+            firstEntry = false;
             int64_t mediaTimeUs;
             CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
             ALOGV("rendering audio at media time %.2f secs", mediaTimeUs / 1E6);
             if (mFirstAudioTimeUs == -1) {
                 mFirstAudioTimeUs = mediaTimeUs;
             }
-            mAnchorTimeMediaUs = mediaTimeUs;
 
             uint32_t numFramesPlayed;
             CHECK_EQ(mAudioSink->getPosition(&numFramesPlayed), (status_t)OK);
@@ -405,8 +411,11 @@
 
         if (entry->mBuffer == NULL) {
             // EOS
-
-            notifyEOS(true /* audio */, entry->mFinalResult);
+            int64_t postEOSDelayUs = 0;
+            if (mAudioSink->needsTrailingPadding()) {
+                postEOSDelayUs = getAudioPendingPlayoutUs() + 1000 * mAudioSink->latency();
+            }
+            notifyEOS(true /* audio */, entry->mFinalResult, postEOSDelayUs);
 
             mAudioQueue.erase(mAudioQueue.begin());
             entry = NULL;
@@ -416,26 +425,11 @@
         if (entry->mOffset == 0) {
             int64_t mediaTimeUs;
             CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
-
             ALOGV("rendering audio at media time %.2f secs", mediaTimeUs / 1E6);
-
             mAnchorTimeMediaUs = mediaTimeUs;
 
-            uint32_t numFramesPlayed;
-            CHECK_EQ(mAudioSink->getPosition(&numFramesPlayed), (status_t)OK);
-
-            uint32_t numFramesPendingPlayout =
-                mNumFramesWritten - numFramesPlayed;
-
-            int64_t realTimeOffsetUs =
-                (mAudioSink->latency() / 2  /* XXX */
-                    + numFramesPendingPlayout
-                        * mAudioSink->msecsPerFrame()) * 1000ll;
-
-            // ALOGI("realTimeOffsetUs = %lld us", realTimeOffsetUs);
-
-            mAnchorTimeRealUs =
-                ALooper::GetNowUs() + realTimeOffsetUs;
+            mAnchorTimeRealUs = ALooper::GetNowUs()
+                    + getAudioPendingPlayoutUs() + 1000 * mAudioSink->latency() / 2;
         }
 
         size_t copy = entry->mBuffer->size() - entry->mOffset;
@@ -443,11 +437,13 @@
             copy = numBytesAvailableToWrite;
         }
 
-        CHECK_EQ(mAudioSink->write(
-                    entry->mBuffer->data() + entry->mOffset, copy),
-                 (ssize_t)copy);
+        ssize_t written = mAudioSink->write(entry->mBuffer->data() + entry->mOffset, copy);
+        if (written < 0) {
+            // An error in AudioSink write is fatal here.
+            LOG_ALWAYS_FATAL("AudioSink write error(%zd) when writing %zu bytes", written, copy);
+        }
 
-        entry->mOffset += copy;
+        entry->mOffset += written;
         if (entry->mOffset == entry->mBuffer->size()) {
             entry->mNotifyConsumed->post();
             mAudioQueue.erase(mAudioQueue.begin());
@@ -455,20 +451,50 @@
             entry = NULL;
         }
 
-        numBytesAvailableToWrite -= copy;
-        size_t copiedFrames = copy / mAudioSink->frameSize();
+        numBytesAvailableToWrite -= written;
+        size_t copiedFrames = written / mAudioSink->frameSize();
         mNumFramesWritten += copiedFrames;
 
         notifyIfMediaRenderingStarted();
-    }
 
+        if (written != (ssize_t)copy) {
+            // A short count was received from AudioSink::write()
+            //
+            // AudioSink write should block until exactly the number of bytes are delivered.
+            // But it may return with a short count (without an error) when:
+            //
+            // 1) Size to be copied is not a multiple of the frame size. We consider this fatal.
+            // 2) AudioSink is an AudioCache for data retrieval, and the AudioCache is exceeded.
+
+            // (Case 1)
+            // Must be a multiple of the frame size.  If it is not a multiple of a frame size, it
+            // needs to fail, as we should not carry over fractional frames between calls.
+            CHECK_EQ(copy % mAudioSink->frameSize(), 0);
+
+            // (Case 2)
+            // Return early to the caller.
+            // Beware of calling immediately again as this may busy-loop if you are not careful.
+            ALOGW("AudioSink write short frame count %zd < %zu", written, copy);
+            break;
+        }
+    }
     notifyPosition();
 
     return !mAudioQueue.empty();
 }
 
+int64_t NuPlayer::Renderer::getAudioPendingPlayoutUs() {
+    uint32_t numFramesPlayed;
+    CHECK_EQ(mAudioSink->getPosition(&numFramesPlayed), (status_t)OK);
+
+    uint32_t numFramesPendingPlayout = mNumFramesWritten - numFramesPlayed;
+    return numFramesPendingPlayout * mAudioSink->msecsPerFrame() * 1000;
+}
+
 void NuPlayer::Renderer::postDrainVideoQueue() {
-    if (mDrainVideoQueuePending || mSyncQueues || mPaused) {
+    if (mDrainVideoQueuePending
+            || mSyncQueues
+            || (mPaused && mVideoSampleReceived)) {
         return;
     }
 
@@ -547,16 +573,22 @@
         realTimeUs = mediaTimeUs - mAnchorTimeMediaUs + mAnchorTimeRealUs;
     }
 
-    mVideoLateByUs = ALooper::GetNowUs() - realTimeUs;
-    bool tooLate = (mVideoLateByUs > 40000);
+    bool tooLate = false;
 
-    if (tooLate) {
-        ALOGV("video late by %lld us (%.2f secs)",
-             mVideoLateByUs, mVideoLateByUs / 1E6);
+    if (!mPaused) {
+        mVideoLateByUs = ALooper::GetNowUs() - realTimeUs;
+        tooLate = (mVideoLateByUs > 40000);
+
+        if (tooLate) {
+            ALOGV("video late by %lld us (%.2f secs)",
+                 mVideoLateByUs, mVideoLateByUs / 1E6);
+        } else {
+            ALOGV("rendering video at media time %.2f secs",
+                    (mFlags & FLAG_REAL_TIME ? realTimeUs :
+                    (realTimeUs + mAnchorTimeMediaUs - mAnchorTimeRealUs)) / 1E6);
+        }
     } else {
-        ALOGV("rendering video at media time %.2f secs",
-                (mFlags & FLAG_REAL_TIME ? realTimeUs :
-                (realTimeUs + mAnchorTimeMediaUs - mAnchorTimeRealUs)) / 1E6);
+        mVideoLateByUs = 0ll;
     }
 
     entry->mNotifyConsumed->setInt32("render", !tooLate);
@@ -564,12 +596,15 @@
     mVideoQueue.erase(mVideoQueue.begin());
     entry = NULL;
 
-    if (!mVideoRenderingStarted) {
-        mVideoRenderingStarted = true;
-        notifyVideoRenderingStart();
-    }
+    mVideoSampleReceived = true;
 
-    notifyIfMediaRenderingStarted();
+    if (!mPaused) {
+        if (!mVideoRenderingStarted) {
+            mVideoRenderingStarted = true;
+            notifyVideoRenderingStart();
+        }
+        notifyIfMediaRenderingStarted();
+    }
 
     notifyPosition();
 }
@@ -580,12 +615,12 @@
     notify->post();
 }
 
-void NuPlayer::Renderer::notifyEOS(bool audio, status_t finalResult) {
+void NuPlayer::Renderer::notifyEOS(bool audio, status_t finalResult, int64_t delayUs) {
     sp<AMessage> notify = mNotify->dup();
     notify->setInt32("what", kWhatEOS);
     notify->setInt32("audio", static_cast<int32_t>(audio));
     notify->setInt32("finalResult", finalResult);
-    notify->post();
+    notify->post(delayUs);
 }
 
 void NuPlayer::Renderer::notifyAudioOffloadTearDown() {
@@ -716,6 +751,15 @@
     int32_t audio;
     CHECK(msg->findInt32("audio", &audio));
 
+    {
+        Mutex::Autolock autoLock(mFlushLock);
+        if (audio) {
+            mFlushingAudio = false;
+        } else {
+            mFlushingVideo = false;
+        }
+    }
+
     // If we're currently syncing the queues, i.e. dropping audio while
     // aligning the first audio/video buffer times and only one of the
     // two queues has data, we may starve that queue by not requesting
@@ -734,17 +778,18 @@
         {
             Mutex::Autolock autoLock(mLock);
             flushQueue(&mAudioQueue);
+
+            ++mAudioQueueGeneration;
+            prepareForMediaRenderingStart();
+
+            if (offloadingAudio()) {
+                mFirstAudioTimeUs = -1;
+            }
         }
 
-        Mutex::Autolock autoLock(mFlushLock);
-        mFlushingAudio = false;
-
         mDrainAudioQueuePending = false;
-        ++mAudioQueueGeneration;
 
-        prepareForMediaRenderingStart();
         if (offloadingAudio()) {
-            mFirstAudioTimeUs = -1;
             mAudioSink->pause();
             mAudioSink->flush();
             mAudioSink->start();
@@ -752,15 +797,13 @@
     } else {
         flushQueue(&mVideoQueue);
 
-        Mutex::Autolock autoLock(mFlushLock);
-        mFlushingVideo = false;
-
         mDrainVideoQueuePending = false;
         ++mVideoQueueGeneration;
 
         prepareForMediaRenderingStart();
     }
 
+    mVideoSampleReceived = false;
     notifyFlushComplete(audio);
 }
 
@@ -852,13 +895,16 @@
 void NuPlayer::Renderer::onPause() {
     CHECK(!mPaused);
 
+    {
+        Mutex::Autolock autoLock(mLock);
+        ++mAudioQueueGeneration;
+        ++mVideoQueueGeneration;
+        prepareForMediaRenderingStart();
+        mPaused = true;
+    }
+
     mDrainAudioQueuePending = false;
-    ++mAudioQueueGeneration;
-
     mDrainVideoQueuePending = false;
-    ++mVideoQueueGeneration;
-
-    prepareForMediaRenderingStart();
 
     if (mHasAudio) {
         mAudioSink->pause();
@@ -866,8 +912,6 @@
 
     ALOGV("now paused audio queue has %d entries, video has %d entries",
           mAudioQueue.size(), mVideoQueue.size());
-
-    mPaused = true;
 }
 
 void NuPlayer::Renderer::onResume() {
@@ -879,9 +923,9 @@
         mAudioSink->start();
     }
 
+    Mutex::Autolock autoLock(mLock);
     mPaused = false;
 
-    Mutex::Autolock autoLock(mLock);
     if (!mAudioQueue.empty()) {
         postDrainAudioQueue_l();
     }
@@ -895,7 +939,12 @@
     uint32_t numFramesPlayed;
     CHECK_EQ(mAudioSink->getPosition(&numFramesPlayed), (status_t)OK);
 
-    int64_t currentPositionUs = mFirstAudioTimeUs
+    int64_t firstAudioTimeUs;
+    {
+        Mutex::Autolock autoLock(mLock);
+        firstAudioTimeUs = mFirstAudioTimeUs;
+    }
+    int64_t currentPositionUs = firstAudioTimeUs
             + (numFramesPlayed * mAudioSink->msecsPerFrame()) * 1000ll;
 
     mAudioSink->stop();
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
index 1cba1a0..5c7d2d7 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
@@ -119,6 +119,7 @@
     bool mSyncQueues;
 
     bool mPaused;
+    bool mVideoSampleReceived;
     bool mVideoRenderingStarted;
     int32_t mVideoRenderingStartGeneration;
     int32_t mAudioRenderingStartGeneration;
@@ -129,6 +130,7 @@
     size_t fillAudioBuffer(void *buffer, size_t size);
 
     bool onDrainAudioQueue();
+    int64_t getAudioPendingPlayoutUs();
     void postDrainAudioQueue_l(int64_t delayUs = 0);
 
     void onDrainVideoQueue();
@@ -146,7 +148,7 @@
     void onResume();
     void onAudioOffloadTearDown();
 
-    void notifyEOS(bool audio, status_t finalResult);
+    void notifyEOS(bool audio, status_t finalResult, int64_t delayUs = 0);
     void notifyFlushComplete(bool audio);
     void notifyPosition();
     void notifyVideoLateBy(int64_t lateByUs);
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerSource.h b/media/libmediaplayerservice/nuplayer/NuPlayerSource.h
index 74892b6..7ccf3b1 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerSource.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerSource.h
@@ -45,11 +45,13 @@
         kWhatPrepared,
         kWhatFlagsChanged,
         kWhatVideoSizeChanged,
+        kWhatBufferingUpdate,
         kWhatBufferingStart,
         kWhatBufferingEnd,
         kWhatSubtitleData,
         kWhatTimedTextData,
         kWhatQueueDecoderShutdown,
+        kWhatDrmNoLicense,
     };
 
     // The provides message is used to notify the player about various
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index b44d5cc..9b03b71 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -813,7 +813,10 @@
 
     for (OMX_U32 i = cancelStart; i < cancelEnd; i++) {
         BufferInfo *info = &mBuffers[kPortIndexOutput].editItemAt(i);
-        cancelBufferToNativeWindow(info);
+        status_t error = cancelBufferToNativeWindow(info);
+        if (err == 0) {
+            err = error;
+        }
     }
 
     return err;
@@ -888,11 +891,12 @@
     int err = mNativeWindow->cancelBuffer(
         mNativeWindow.get(), info->mGraphicBuffer.get(), -1);
 
-    CHECK_EQ(err, 0);
+    ALOGW_IF(err != 0, "[%s] can not return buffer %u to native window",
+            mComponentName.c_str(), info->mBufferID);
 
     info->mStatus = BufferInfo::OWNED_BY_NATIVE_WINDOW;
 
-    return OK;
+    return err;
 }
 
 ACodec::BufferInfo *ACodec::dequeueBufferFromNativeWindow() {
@@ -992,7 +996,7 @@
 
     if (portIndex == kPortIndexOutput && mNativeWindow != NULL
             && info->mStatus == BufferInfo::OWNED_BY_US) {
-        CHECK_EQ((status_t)OK, cancelBufferToNativeWindow(info));
+        cancelBufferToNativeWindow(info);
     }
 
     CHECK_EQ(mOMX->freeBuffer(
@@ -1241,13 +1245,13 @@
             tunneled != 0) {
             ALOGI("Configuring TUNNELED video playback.");
 
-            int64_t audioHwSync = 0;
-            if (!msg->findInt64("audio-hw-sync", &audioHwSync)) {
+            int32_t audioHwSync = 0;
+            if (!msg->findInt32("audio-hw-sync", &audioHwSync)) {
                 ALOGW("No Audio HW Sync provided for video tunnel");
             }
             err = configureTunneledVideoPlayback(audioHwSync, nativeWindow);
             if (err != OK) {
-                ALOGE("configureTunneledVideoPlayback(%" PRId64 ",%p) failed!",
+                ALOGE("configureTunneledVideoPlayback(%d,%p) failed!",
                         audioHwSync, nativeWindow.get());
                 return err;
             }
@@ -1894,7 +1898,7 @@
 }
 
 status_t ACodec::configureTunneledVideoPlayback(
-        int64_t audioHwSync, const sp<ANativeWindow> &nativeWindow) {
+        int32_t audioHwSync, const sp<ANativeWindow> &nativeWindow) {
     native_handle_t* sidebandHandle;
 
     status_t err = mOMX->configureVideoTunnelMode(
@@ -2309,7 +2313,6 @@
         return 0;
     }
     OMX_U32 ret = frameRate * iFramesInterval;
-    CHECK(ret > 1);
     return ret;
 }
 
diff --git a/media/libstagefright/Android.mk b/media/libstagefright/Android.mk
index be9af5e..193f8a7 100644
--- a/media/libstagefright/Android.mk
+++ b/media/libstagefright/Android.mk
@@ -62,6 +62,7 @@
         avc_utils.cpp                     \
 
 LOCAL_C_INCLUDES:= \
+        $(TOP)/frameworks/av/include/media/ \
         $(TOP)/frameworks/av/include/media/stagefright/timedtext \
         $(TOP)/frameworks/native/include/media/hardware \
         $(TOP)/frameworks/native/include/media/openmax \
@@ -70,6 +71,8 @@
         $(TOP)/external/openssl/include \
         $(TOP)/external/libvpx/libwebm \
         $(TOP)/system/netd/include \
+        $(TOP)/external/icu/icu4c/source/common \
+        $(TOP)/external/icu/icu4c/source/i18n \
 
 LOCAL_SHARED_LIBRARIES := \
         libbinder \
diff --git a/media/libstagefright/DataSource.cpp b/media/libstagefright/DataSource.cpp
index 008da5a..9d6fd78 100644
--- a/media/libstagefright/DataSource.cpp
+++ b/media/libstagefright/DataSource.cpp
@@ -186,9 +186,9 @@
         const sp<IMediaHTTPService> &httpService,
         const char *uri,
         const KeyedVector<String8, String8> *headers,
-        AString *sniffedMIME) {
-    if (sniffedMIME != NULL) {
-        *sniffedMIME = "";
+        String8 *contentType) {
+    if (contentType != NULL) {
+        *contentType = "";
     }
 
     bool isWidevine = !strncasecmp("widevine://", uri, 11);
@@ -226,77 +226,14 @@
         }
 
         if (!isWidevine) {
-            String8 contentType = httpSource->getMIMEType();
+            if (contentType != NULL) {
+                *contentType = httpSource->getMIMEType();
+            }
 
-            sp<NuCachedSource2> cachedSource = new NuCachedSource2(
+            source = new NuCachedSource2(
                     httpSource,
                     cacheConfig.isEmpty() ? NULL : cacheConfig.string(),
                     disconnectAtHighwatermark);
-
-            if (strncasecmp(contentType.string(), "audio/", 6)) {
-                // We're not doing this for streams that appear to be audio-only
-                // streams to ensure that even low bandwidth streams start
-                // playing back fairly instantly.
-
-                // We're going to prefill the cache before trying to instantiate
-                // the extractor below, as the latter is an operation that otherwise
-                // could block on the datasource for a significant amount of time.
-                // During that time we'd be unable to abort the preparation phase
-                // without this prefill.
-
-                // Initially make sure we have at least 192 KB for the sniff
-                // to complete without blocking.
-                static const size_t kMinBytesForSniffing = 192 * 1024;
-
-                off64_t metaDataSize = -1ll;
-                for (;;) {
-                    status_t finalStatus;
-                    size_t cachedDataRemaining =
-                            cachedSource->approxDataRemaining(&finalStatus);
-
-                    if (finalStatus != OK || (metaDataSize >= 0
-                            && (off64_t)cachedDataRemaining >= metaDataSize)) {
-                        ALOGV("stop caching, status %d, "
-                                "metaDataSize %lld, cachedDataRemaining %zu",
-                                finalStatus, metaDataSize, cachedDataRemaining);
-                        break;
-                    }
-
-                    ALOGV("now cached %zu bytes of data", cachedDataRemaining);
-
-                    if (metaDataSize < 0
-                            && cachedDataRemaining >= kMinBytesForSniffing) {
-                        String8 tmp;
-                        float confidence;
-                        sp<AMessage> meta;
-                        if (!cachedSource->sniff(&tmp, &confidence, &meta)) {
-                            return NULL;
-                        }
-
-                        // We successfully identified the file's extractor to
-                        // be, remember this mime type so we don't have to
-                        // sniff it again when we call MediaExtractor::Create()
-                        if (sniffedMIME != NULL) {
-                            *sniffedMIME = tmp.string();
-                        }
-
-                        if (meta == NULL
-                                || !meta->findInt64("meta-data-size",
-                                     reinterpret_cast<int64_t*>(&metaDataSize))) {
-                            metaDataSize = kDefaultMetaSize;
-                        }
-
-                        if (metaDataSize < 0ll) {
-                            ALOGE("invalid metaDataSize = %lld bytes", metaDataSize);
-                            return NULL;
-                        }
-                    }
-
-                    usleep(200000);
-                }
-            }
-
-            source = cachedSource;
         } else {
             // We do not want that prefetching, caching, datasource wrapper
             // in the widevine:// case.
diff --git a/media/libstagefright/MPEG4Extractor.cpp b/media/libstagefright/MPEG4Extractor.cpp
index 0064293..1729f93 100644
--- a/media/libstagefright/MPEG4Extractor.cpp
+++ b/media/libstagefright/MPEG4Extractor.cpp
@@ -2810,7 +2810,6 @@
 
     {
         if (objectType == AOT_SBR || objectType == AOT_PS) {
-            const int32_t extensionSamplingFrequency = br.getBits(4);
             objectType = br.getBits(5);
 
             if (objectType == AOT_ESCAPE) {
@@ -2828,9 +2827,30 @@
                 const int32_t coreCoderDelay = br.getBits(14);
             }
 
-            const int32_t extensionFlag = br.getBits(1);
+            int32_t extensionFlag = -1;
+            if (br.numBitsLeft() > 0) {
+                extensionFlag = br.getBits(1);
+            } else {
+                switch (objectType) {
+                // 14496-3 4.5.1.1 extensionFlag
+                case AOT_AAC_LC:
+                    extensionFlag = 0;
+                    break;
+                case AOT_ER_AAC_LC:
+                case AOT_ER_AAC_SCAL:
+                case AOT_ER_BSAC:
+                case AOT_ER_AAC_LD:
+                    extensionFlag = 1;
+                    break;
+                default:
+                    TRESPASS();
+                    break;
+                }
+                ALOGW("csd missing extension flag; assuming %d for object type %u.",
+                        extensionFlag, objectType);
+            }
 
-            if (numChannels == 0 ) {
+            if (numChannels == 0) {
                 int32_t channelsEffectiveNum = 0;
                 int32_t channelsNum = 0;
                 const int32_t ElementInstanceTag = br.getBits(4);
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index 7bb7ed9..fc2dd30 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -716,12 +716,13 @@
                     CHECK(msg->findInt32("err", &err));
                     CHECK(msg->findInt32("actionCode", &actionCode));
 
-                    ALOGE("Codec reported err %#x, actionCode %d", err, actionCode);
+                    ALOGE("Codec reported err %#x, actionCode %d, while in state %d",
+                            err, actionCode, mState);
                     if (err == DEAD_OBJECT) {
                         mFlags |= kFlagSawMediaServerDie;
                     }
 
-                    bool sendErrorReponse = true;
+                    bool sendErrorResponse = true;
 
                     switch (mState) {
                         case INITIALIZING:
@@ -732,13 +733,15 @@
 
                         case CONFIGURING:
                         {
-                            setState(INITIALIZED);
+                            setState(actionCode == ACTION_CODE_FATAL ?
+                                    UNINITIALIZED : INITIALIZED);
                             break;
                         }
 
                         case STARTING:
                         {
-                            setState(CONFIGURED);
+                            setState(actionCode == ACTION_CODE_FATAL ?
+                                    UNINITIALIZED : CONFIGURED);
                             break;
                         }
 
@@ -748,7 +751,7 @@
                             // Ignore the error, assuming we'll still get
                             // the shutdown complete notification.
 
-                            sendErrorReponse = false;
+                            sendErrorResponse = false;
 
                             if (mFlags & kFlagSawMediaServerDie) {
                                 // MediaServer died, there definitely won't
@@ -767,15 +770,19 @@
 
                         case FLUSHING:
                         {
-                            setState(
-                                    (mFlags & kFlagIsAsync) ? FLUSHED : STARTED);
+                            if (actionCode == ACTION_CODE_FATAL) {
+                                setState(UNINITIALIZED);
+                            } else {
+                                setState(
+                                        (mFlags & kFlagIsAsync) ? FLUSHED : STARTED);
+                            }
                             break;
                         }
 
                         case FLUSHED:
                         case STARTED:
                         {
-                            sendErrorReponse = false;
+                            sendErrorResponse = false;
 
                             setStickyError(err);
                             postActivityNotificationIfPossible();
@@ -800,7 +807,7 @@
 
                         default:
                         {
-                            sendErrorReponse = false;
+                            sendErrorResponse = false;
 
                             setStickyError(err);
                             postActivityNotificationIfPossible();
@@ -826,7 +833,7 @@
                         }
                     }
 
-                    if (sendErrorReponse) {
+                    if (sendErrorResponse) {
                         PostReplyWithError(mReplyID, err);
                     }
                     break;
@@ -1113,7 +1120,11 @@
 
                 case CodecBase::kWhatFlushCompleted:
                 {
-                    CHECK_EQ(mState, FLUSHING);
+                    if (mState != FLUSHING) {
+                        ALOGW("received FlushCompleted message in state %d",
+                                mState);
+                        break;
+                    }
 
                     if (mFlags & kFlagIsAsync) {
                         setState(FLUSHED);
diff --git a/media/libstagefright/OMXCodec.cpp b/media/libstagefright/OMXCodec.cpp
index 3d1d40e..a8806c8 100644
--- a/media/libstagefright/OMXCodec.cpp
+++ b/media/libstagefright/OMXCodec.cpp
@@ -994,7 +994,6 @@
         return 0;
     }
     OMX_U32 ret = frameRate * iFramesInterval - 1;
-    CHECK(ret > 1);
     return ret;
 }
 
@@ -2500,12 +2499,6 @@
                        data1, data2);
 
             if (data2 == 0 || data2 == OMX_IndexParamPortDefinition) {
-                // There is no need to check whether mFilledBuffers is empty or not
-                // when the OMX_EventPortSettingsChanged is not meant for reallocating
-                // the output buffers.
-                if (data1 == kPortIndexOutput) {
-                    CHECK(mFilledBuffers.empty());
-                }
                 onPortSettingsChanged(data1);
             } else if (data1 == kPortIndexOutput &&
                         (data2 == OMX_IndexConfigCommonOutputCrop ||
@@ -2899,7 +2892,7 @@
 void OMXCodec::onPortSettingsChanged(OMX_U32 portIndex) {
     CODEC_LOGV("PORT_SETTINGS_CHANGED(%ld)", portIndex);
 
-    CHECK_EQ((int)mState, (int)EXECUTING);
+    CHECK(mState == EXECUTING || mState == EXECUTING_TO_IDLE);
     CHECK_EQ(portIndex, (OMX_U32)kPortIndexOutput);
     CHECK(!mOutputPortSettingsChangedPending);
 
diff --git a/media/libstagefright/StagefrightMetadataRetriever.cpp b/media/libstagefright/StagefrightMetadataRetriever.cpp
index 8cc41e7..101fc8a 100644
--- a/media/libstagefright/StagefrightMetadataRetriever.cpp
+++ b/media/libstagefright/StagefrightMetadataRetriever.cpp
@@ -32,6 +32,7 @@
 #include <media/stagefright/MetaData.h>
 #include <media/stagefright/OMXCodec.h>
 #include <media/stagefright/MediaDefs.h>
+#include <CharacterEncodingDetector.h>
 
 namespace android {
 
@@ -450,33 +451,59 @@
     struct Map {
         int from;
         int to;
+        const char *name;
     };
     static const Map kMap[] = {
-        { kKeyMIMEType, METADATA_KEY_MIMETYPE },
-        { kKeyCDTrackNumber, METADATA_KEY_CD_TRACK_NUMBER },
-        { kKeyDiscNumber, METADATA_KEY_DISC_NUMBER },
-        { kKeyAlbum, METADATA_KEY_ALBUM },
-        { kKeyArtist, METADATA_KEY_ARTIST },
-        { kKeyAlbumArtist, METADATA_KEY_ALBUMARTIST },
-        { kKeyAuthor, METADATA_KEY_AUTHOR },
-        { kKeyComposer, METADATA_KEY_COMPOSER },
-        { kKeyDate, METADATA_KEY_DATE },
-        { kKeyGenre, METADATA_KEY_GENRE },
-        { kKeyTitle, METADATA_KEY_TITLE },
-        { kKeyYear, METADATA_KEY_YEAR },
-        { kKeyWriter, METADATA_KEY_WRITER },
-        { kKeyCompilation, METADATA_KEY_COMPILATION },
-        { kKeyLocation, METADATA_KEY_LOCATION },
+        { kKeyMIMEType, METADATA_KEY_MIMETYPE, NULL },
+        { kKeyCDTrackNumber, METADATA_KEY_CD_TRACK_NUMBER, "tracknumber" },
+        { kKeyDiscNumber, METADATA_KEY_DISC_NUMBER, "discnumber" },
+        { kKeyAlbum, METADATA_KEY_ALBUM, "album" },
+        { kKeyArtist, METADATA_KEY_ARTIST, "artist" },
+        { kKeyAlbumArtist, METADATA_KEY_ALBUMARTIST, "albumartist" },
+        { kKeyAuthor, METADATA_KEY_AUTHOR, NULL },
+        { kKeyComposer, METADATA_KEY_COMPOSER, "composer" },
+        { kKeyDate, METADATA_KEY_DATE, NULL },
+        { kKeyGenre, METADATA_KEY_GENRE, "genre" },
+        { kKeyTitle, METADATA_KEY_TITLE, "title" },
+        { kKeyYear, METADATA_KEY_YEAR, "year" },
+        { kKeyWriter, METADATA_KEY_WRITER, "writer" },
+        { kKeyCompilation, METADATA_KEY_COMPILATION, "compilation" },
+        { kKeyLocation, METADATA_KEY_LOCATION, NULL },
     };
+
     static const size_t kNumMapEntries = sizeof(kMap) / sizeof(kMap[0]);
 
+    CharacterEncodingDetector *detector = new CharacterEncodingDetector();
+
     for (size_t i = 0; i < kNumMapEntries; ++i) {
         const char *value;
         if (meta->findCString(kMap[i].from, &value)) {
-            mMetaData.add(kMap[i].to, String8(value));
+            if (kMap[i].name) {
+                // add to charset detector
+                detector->addTag(kMap[i].name, value);
+            } else {
+                // directly add to output list
+                mMetaData.add(kMap[i].to, String8(value));
+            }
         }
     }
 
+    detector->detectAndConvert();
+    int size = detector->size();
+    if (size) {
+        for (int i = 0; i < size; i++) {
+            const char *name;
+            const char *value;
+            detector->getTag(i, &name, &value);
+            for (size_t j = 0; j < kNumMapEntries; ++j) {
+                if (kMap[j].name && !strcmp(kMap[j].name, name)) {
+                    mMetaData.add(kMap[j].to, String8(value));
+                }
+            }
+        }
+    }
+    delete detector;
+
     const void *data;
     uint32_t type;
     size_t dataSize;
diff --git a/media/libstagefright/codecs/aacdec/SoftAAC2.cpp b/media/libstagefright/codecs/aacdec/SoftAAC2.cpp
index 09c6e69..8b4dd6f 100644
--- a/media/libstagefright/codecs/aacdec/SoftAAC2.cpp
+++ b/media/libstagefright/codecs/aacdec/SoftAAC2.cpp
@@ -493,7 +493,8 @@
     return mOutputDelayRingBufferSize - outputDelayRingBufferSamplesAvailable();
 }
 
-void SoftAAC2::onQueueFilled(OMX_U32 portIndex) {
+
+void SoftAAC2::onQueueFilled(OMX_U32 /* portIndex */) {
     if (mSignalledError || mOutputPortSettingsChange != NONE) {
         return;
     }
@@ -505,59 +506,54 @@
     List<BufferInfo *> &inQueue = getPortQueue(0);
     List<BufferInfo *> &outQueue = getPortQueue(1);
 
-    if (portIndex == 0 && mInputBufferCount == 0) {
-        BufferInfo *inInfo = *inQueue.begin();
-        OMX_BUFFERHEADERTYPE *inHeader = inInfo->mHeader;
-
-        inBuffer[0] = inHeader->pBuffer + inHeader->nOffset;
-        inBufferLength[0] = inHeader->nFilledLen;
-
-        AAC_DECODER_ERROR decoderErr =
-            aacDecoder_ConfigRaw(mAACDecoder,
-                                 inBuffer,
-                                 inBufferLength);
-
-        if (decoderErr != AAC_DEC_OK) {
-            ALOGW("aacDecoder_ConfigRaw decoderErr = 0x%4.4x", decoderErr);
-            mSignalledError = true;
-            notify(OMX_EventError, OMX_ErrorUndefined, decoderErr, NULL);
-            return;
-        }
-
-        mInputBufferCount++;
-        mOutputBufferCount++; // fake increase of outputBufferCount to keep the counters aligned
-
-        inInfo->mOwnedByUs = false;
-        inQueue.erase(inQueue.begin());
-        inInfo = NULL;
-        notifyEmptyBufferDone(inHeader);
-        inHeader = NULL;
-
-        configureDownmix();
-        // Only send out port settings changed event if both sample rate
-        // and numChannels are valid.
-        if (mStreamInfo->sampleRate && mStreamInfo->numChannels) {
-            ALOGI("Initially configuring decoder: %d Hz, %d channels",
-                mStreamInfo->sampleRate,
-                mStreamInfo->numChannels);
-
-            notify(OMX_EventPortSettingsChanged, 1, 0, NULL);
-            mOutputPortSettingsChange = AWAITING_DISABLED;
-        }
-
-        return;
-    }
-
     while ((!inQueue.empty() || mEndOfInput) && !outQueue.empty()) {
         if (!inQueue.empty()) {
             INT_PCM tmpOutBuffer[2048 * MAX_CHANNEL_COUNT];
             BufferInfo *inInfo = *inQueue.begin();
             OMX_BUFFERHEADERTYPE *inHeader = inInfo->mHeader;
 
-            if (inHeader->nFlags & OMX_BUFFERFLAG_EOS) {
-                mEndOfInput = true;
-            } else {
-                mEndOfInput = false;
+            mEndOfInput = (inHeader->nFlags & OMX_BUFFERFLAG_EOS) != 0;
+            if ((inHeader->nFlags & OMX_BUFFERFLAG_CODECCONFIG) != 0) {
+                BufferInfo *inInfo = *inQueue.begin();
+                OMX_BUFFERHEADERTYPE *inHeader = inInfo->mHeader;
+
+                inBuffer[0] = inHeader->pBuffer + inHeader->nOffset;
+                inBufferLength[0] = inHeader->nFilledLen;
+
+                AAC_DECODER_ERROR decoderErr =
+                    aacDecoder_ConfigRaw(mAACDecoder,
+                                         inBuffer,
+                                         inBufferLength);
+
+                if (decoderErr != AAC_DEC_OK) {
+                    ALOGW("aacDecoder_ConfigRaw decoderErr = 0x%4.4x", decoderErr);
+                    mSignalledError = true;
+                    notify(OMX_EventError, OMX_ErrorUndefined, decoderErr, NULL);
+                    return;
+                }
+
+                mInputBufferCount++;
+                mOutputBufferCount++; // fake increase of outputBufferCount to keep the counters aligned
+
+                inInfo->mOwnedByUs = false;
+                inQueue.erase(inQueue.begin());
+                mLastInHeader = NULL;
+                inInfo = NULL;
+                notifyEmptyBufferDone(inHeader);
+                inHeader = NULL;
+
+                configureDownmix();
+                // Only send out port settings changed event if both sample rate
+                // and numChannels are valid.
+                if (mStreamInfo->sampleRate && mStreamInfo->numChannels) {
+                    ALOGI("Initially configuring decoder: %d Hz, %d channels",
+                        mStreamInfo->sampleRate,
+                        mStreamInfo->numChannels);
+
+                    notify(OMX_EventPortSettingsChanged, 1, 0, NULL);
+                    mOutputPortSettingsChange = AWAITING_DISABLED;
+                }
+                return;
             }
 
             if (inHeader->nFilledLen == 0) {
@@ -567,206 +563,193 @@
                 inInfo = NULL;
                 notifyEmptyBufferDone(inHeader);
                 inHeader = NULL;
-            } else {
-                if (mIsADTS) {
-                    size_t adtsHeaderSize = 0;
-                    // skip 30 bits, aac_frame_length follows.
-                    // ssssssss ssssiiip ppffffPc ccohCCll llllllll lll?????
+                continue;
+            }
 
-                    const uint8_t *adtsHeader = inHeader->pBuffer + inHeader->nOffset;
+            if (mIsADTS) {
+                size_t adtsHeaderSize = 0;
+                // skip 30 bits, aac_frame_length follows.
+                // ssssssss ssssiiip ppffffPc ccohCCll llllllll lll?????
 
-                    bool signalError = false;
-                    if (inHeader->nFilledLen < 7) {
-                        ALOGE("Audio data too short to contain even the ADTS header. "
-                                "Got %d bytes.", inHeader->nFilledLen);
+                const uint8_t *adtsHeader = inHeader->pBuffer + inHeader->nOffset;
+
+                bool signalError = false;
+                if (inHeader->nFilledLen < 7) {
+                    ALOGE("Audio data too short to contain even the ADTS header. "
+                            "Got %d bytes.", inHeader->nFilledLen);
+                    hexdump(adtsHeader, inHeader->nFilledLen);
+                    signalError = true;
+                } else {
+                    bool protectionAbsent = (adtsHeader[1] & 1);
+
+                    unsigned aac_frame_length =
+                        ((adtsHeader[3] & 3) << 11)
+                        | (adtsHeader[4] << 3)
+                        | (adtsHeader[5] >> 5);
+
+                    if (inHeader->nFilledLen < aac_frame_length) {
+                        ALOGE("Not enough audio data for the complete frame. "
+                                "Got %d bytes, frame size according to the ADTS "
+                                "header is %u bytes.",
+                                inHeader->nFilledLen, aac_frame_length);
                         hexdump(adtsHeader, inHeader->nFilledLen);
                         signalError = true;
                     } else {
-                        bool protectionAbsent = (adtsHeader[1] & 1);
+                        adtsHeaderSize = (protectionAbsent ? 7 : 9);
 
-                        unsigned aac_frame_length =
-                            ((adtsHeader[3] & 3) << 11)
-                            | (adtsHeader[4] << 3)
-                            | (adtsHeader[5] >> 5);
+                        inBuffer[0] = (UCHAR *)adtsHeader + adtsHeaderSize;
+                        inBufferLength[0] = aac_frame_length - adtsHeaderSize;
 
-                        if (inHeader->nFilledLen < aac_frame_length) {
-                            ALOGE("Not enough audio data for the complete frame. "
-                                    "Got %d bytes, frame size according to the ADTS "
-                                    "header is %u bytes.",
-                                    inHeader->nFilledLen, aac_frame_length);
-                            hexdump(adtsHeader, inHeader->nFilledLen);
-                            signalError = true;
-                        } else {
-                            adtsHeaderSize = (protectionAbsent ? 7 : 9);
-
-                            inBuffer[0] = (UCHAR *)adtsHeader + adtsHeaderSize;
-                            inBufferLength[0] = aac_frame_length - adtsHeaderSize;
-
-                            inHeader->nOffset += adtsHeaderSize;
-                            inHeader->nFilledLen -= adtsHeaderSize;
-                        }
-                    }
-
-                    if (signalError) {
-                        mSignalledError = true;
-
-                        notify(OMX_EventError,
-                               OMX_ErrorStreamCorrupt,
-                               ERROR_MALFORMED,
-                               NULL);
-
-                        return;
-                    }
-                } else {
-                    inBuffer[0] = inHeader->pBuffer + inHeader->nOffset;
-                    inBufferLength[0] = inHeader->nFilledLen;
-                }
-
-                // Fill and decode
-                bytesValid[0] = inBufferLength[0];
-
-                INT prevSampleRate = mStreamInfo->sampleRate;
-                INT prevNumChannels = mStreamInfo->numChannels;
-
-                if (inHeader != mLastInHeader) {
-                    mLastInHeader = inHeader;
-                    mCurrentInputTime = inHeader->nTimeStamp;
-                } else {
-                    if (mStreamInfo->sampleRate) {
-                        mCurrentInputTime += mStreamInfo->aacSamplesPerFrame *
-                                1000000ll / mStreamInfo->sampleRate;
-                    } else {
-                        ALOGW("no sample rate yet");
+                        inHeader->nOffset += adtsHeaderSize;
+                        inHeader->nFilledLen -= adtsHeaderSize;
                     }
                 }
-                mAnchorTimes.add(mCurrentInputTime);
-                aacDecoder_Fill(mAACDecoder,
-                                inBuffer,
-                                inBufferLength,
-                                bytesValid);
 
-                 // run DRC check
-                 mDrcWrap.submitStreamData(mStreamInfo);
-                 mDrcWrap.update();
-
-                AAC_DECODER_ERROR decoderErr =
-                    aacDecoder_DecodeFrame(mAACDecoder,
-                                           tmpOutBuffer,
-                                           2048 * MAX_CHANNEL_COUNT,
-                                           0 /* flags */);
-
-                if (decoderErr != AAC_DEC_OK) {
-                    ALOGW("aacDecoder_DecodeFrame decoderErr = 0x%4.4x", decoderErr);
-                }
-
-                if (decoderErr == AAC_DEC_NOT_ENOUGH_BITS) {
-                    ALOGE("AAC_DEC_NOT_ENOUGH_BITS should never happen");
+                if (signalError) {
                     mSignalledError = true;
-                    notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL);
+                    notify(OMX_EventError, OMX_ErrorStreamCorrupt, ERROR_MALFORMED, NULL);
                     return;
                 }
+            } else {
+                inBuffer[0] = inHeader->pBuffer + inHeader->nOffset;
+                inBufferLength[0] = inHeader->nFilledLen;
+            }
 
-                if (bytesValid[0] != 0) {
-                    ALOGE("bytesValid[0] != 0 should never happen");
-                    mSignalledError = true;
-                    notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL);
-                    return;
-                }
+            // Fill and decode
+            bytesValid[0] = inBufferLength[0];
 
-                size_t numOutBytes =
-                    mStreamInfo->frameSize * sizeof(int16_t) * mStreamInfo->numChannels;
+            INT prevSampleRate = mStreamInfo->sampleRate;
+            INT prevNumChannels = mStreamInfo->numChannels;
 
-                if (decoderErr == AAC_DEC_OK) {
-                    if (!outputDelayRingBufferPutSamples(tmpOutBuffer,
-                            mStreamInfo->frameSize * mStreamInfo->numChannels)) {
-                        mSignalledError = true;
-                        notify(OMX_EventError, OMX_ErrorUndefined, decoderErr, NULL);
-                        return;
-                    }
-                    UINT inBufferUsedLength = inBufferLength[0] - bytesValid[0];
-                    inHeader->nFilledLen -= inBufferUsedLength;
-                    inHeader->nOffset += inBufferUsedLength;
+            if (inHeader != mLastInHeader) {
+                mLastInHeader = inHeader;
+                mCurrentInputTime = inHeader->nTimeStamp;
+            } else {
+                if (mStreamInfo->sampleRate) {
+                    mCurrentInputTime += mStreamInfo->aacSamplesPerFrame *
+                            1000000ll / mStreamInfo->sampleRate;
                 } else {
-                    ALOGW("AAC decoder returned error 0x%4.4x, substituting silence", decoderErr);
-
-                    memset(tmpOutBuffer, 0, numOutBytes); // TODO: check for overflow
-
-                    if (!outputDelayRingBufferPutSamples(tmpOutBuffer,
-                            mStreamInfo->frameSize * mStreamInfo->numChannels)) {
-                        mSignalledError = true;
-                        notify(OMX_EventError, OMX_ErrorUndefined, decoderErr, NULL);
-                        return;
-                    }
-
-                    // Discard input buffer.
-                    inHeader->nFilledLen = 0;
-
-                    aacDecoder_SetParam(mAACDecoder, AAC_TPDEC_CLEAR_BUFFER, 1);
-
-                    // fall through
+                    ALOGW("no sample rate yet");
                 }
+            }
+            mAnchorTimes.add(mCurrentInputTime);
+            aacDecoder_Fill(mAACDecoder,
+                            inBuffer,
+                            inBufferLength,
+                            bytesValid);
 
-                /*
-                 * AAC+/eAAC+ streams can be signalled in two ways: either explicitly
-                 * or implicitly, according to MPEG4 spec. AAC+/eAAC+ is a dual
-                 * rate system and the sampling rate in the final output is actually
-                 * doubled compared with the core AAC decoder sampling rate.
-                 *
-                 * Explicit signalling is done by explicitly defining SBR audio object
-                 * type in the bitstream. Implicit signalling is done by embedding
-                 * SBR content in AAC extension payload specific to SBR, and hence
-                 * requires an AAC decoder to perform pre-checks on actual audio frames.
-                 *
-                 * Thus, we could not say for sure whether a stream is
-                 * AAC+/eAAC+ until the first data frame is decoded.
-                 */
-                if (mOutputBufferCount > 1) {
-                    if (mStreamInfo->sampleRate != prevSampleRate ||
-                        mStreamInfo->numChannels != prevNumChannels) {
-                        ALOGE("can not reconfigure AAC output");
-                        mSignalledError = true;
-                        notify(OMX_EventError, OMX_ErrorUndefined, decoderErr, NULL);
-                        return;
-                    }
-                }
-                if (mInputBufferCount <= 2) { // TODO: <= 1
-                    if (mStreamInfo->sampleRate != prevSampleRate ||
-                        mStreamInfo->numChannels != prevNumChannels) {
-                        ALOGI("Reconfiguring decoder: %d->%d Hz, %d->%d channels",
-                              prevSampleRate, mStreamInfo->sampleRate,
-                              prevNumChannels, mStreamInfo->numChannels);
+             // run DRC check
+             mDrcWrap.submitStreamData(mStreamInfo);
+             mDrcWrap.update();
 
-                        notify(OMX_EventPortSettingsChanged, 1, 0, NULL);
-                        mOutputPortSettingsChange = AWAITING_DISABLED;
+            AAC_DECODER_ERROR decoderErr =
+                aacDecoder_DecodeFrame(mAACDecoder,
+                                       tmpOutBuffer,
+                                       2048 * MAX_CHANNEL_COUNT,
+                                       0 /* flags */);
 
-                        if (inHeader->nFilledLen == 0) {
-                            inInfo->mOwnedByUs = false;
-                            mInputBufferCount++;
-                            inQueue.erase(inQueue.begin());
-                            mLastInHeader = NULL;
-                            inInfo = NULL;
-                            notifyEmptyBufferDone(inHeader);
-                            inHeader = NULL;
-                        }
-                        return;
-                    }
-                } else if (!mStreamInfo->sampleRate || !mStreamInfo->numChannels) {
-                    ALOGW("Invalid AAC stream");
+            if (decoderErr != AAC_DEC_OK) {
+                ALOGW("aacDecoder_DecodeFrame decoderErr = 0x%4.4x", decoderErr);
+            }
+
+            if (decoderErr == AAC_DEC_NOT_ENOUGH_BITS) {
+                ALOGE("AAC_DEC_NOT_ENOUGH_BITS should never happen");
+                mSignalledError = true;
+                notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL);
+                return;
+            }
+
+            if (bytesValid[0] != 0) {
+                ALOGE("bytesValid[0] != 0 should never happen");
+                mSignalledError = true;
+                notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL);
+                return;
+            }
+
+            size_t numOutBytes =
+                mStreamInfo->frameSize * sizeof(int16_t) * mStreamInfo->numChannels;
+
+            if (decoderErr == AAC_DEC_OK) {
+                if (!outputDelayRingBufferPutSamples(tmpOutBuffer,
+                        mStreamInfo->frameSize * mStreamInfo->numChannels)) {
                     mSignalledError = true;
                     notify(OMX_EventError, OMX_ErrorUndefined, decoderErr, NULL);
                     return;
                 }
-                if (inHeader->nFilledLen == 0) {
-                    inInfo->mOwnedByUs = false;
-                    mInputBufferCount++;
-                    inQueue.erase(inQueue.begin());
-                    mLastInHeader = NULL;
-                    inInfo = NULL;
-                    notifyEmptyBufferDone(inHeader);
-                    inHeader = NULL;
-                } else {
-                    ALOGV("inHeader->nFilledLen = %d", inHeader->nFilledLen);
+                UINT inBufferUsedLength = inBufferLength[0] - bytesValid[0];
+                inHeader->nFilledLen -= inBufferUsedLength;
+                inHeader->nOffset += inBufferUsedLength;
+            } else {
+                ALOGW("AAC decoder returned error 0x%4.4x, substituting silence", decoderErr);
+
+                memset(tmpOutBuffer, 0, numOutBytes); // TODO: check for overflow
+
+                if (!outputDelayRingBufferPutSamples(tmpOutBuffer,
+                        mStreamInfo->frameSize * mStreamInfo->numChannels)) {
+                    mSignalledError = true;
+                    notify(OMX_EventError, OMX_ErrorUndefined, decoderErr, NULL);
+                    return;
                 }
+
+                // Discard input buffer.
+                inHeader->nFilledLen = 0;
+
+                aacDecoder_SetParam(mAACDecoder, AAC_TPDEC_CLEAR_BUFFER, 1);
+
+                // fall through
+            }
+
+            /*
+             * AAC+/eAAC+ streams can be signalled in two ways: either explicitly
+             * or implicitly, according to MPEG4 spec. AAC+/eAAC+ is a dual
+             * rate system and the sampling rate in the final output is actually
+             * doubled compared with the core AAC decoder sampling rate.
+             *
+             * Explicit signalling is done by explicitly defining SBR audio object
+             * type in the bitstream. Implicit signalling is done by embedding
+             * SBR content in AAC extension payload specific to SBR, and hence
+             * requires an AAC decoder to perform pre-checks on actual audio frames.
+             *
+             * Thus, we could not say for sure whether a stream is
+             * AAC+/eAAC+ until the first data frame is decoded.
+             */
+            if (mInputBufferCount <= 2 || mOutputBufferCount > 1) { // TODO: <= 1
+                if (mStreamInfo->sampleRate != prevSampleRate ||
+                    mStreamInfo->numChannels != prevNumChannels) {
+                    ALOGI("Reconfiguring decoder: %d->%d Hz, %d->%d channels",
+                          prevSampleRate, mStreamInfo->sampleRate,
+                          prevNumChannels, mStreamInfo->numChannels);
+
+                    notify(OMX_EventPortSettingsChanged, 1, 0, NULL);
+                    mOutputPortSettingsChange = AWAITING_DISABLED;
+
+                    if (inHeader->nFilledLen == 0) {
+                        inInfo->mOwnedByUs = false;
+                        mInputBufferCount++;
+                        inQueue.erase(inQueue.begin());
+                        mLastInHeader = NULL;
+                        inInfo = NULL;
+                        notifyEmptyBufferDone(inHeader);
+                        inHeader = NULL;
+                    }
+                    return;
+                }
+            } else if (!mStreamInfo->sampleRate || !mStreamInfo->numChannels) {
+                ALOGW("Invalid AAC stream");
+                mSignalledError = true;
+                notify(OMX_EventError, OMX_ErrorUndefined, decoderErr, NULL);
+                return;
+            }
+            if (inHeader->nFilledLen == 0) {
+                inInfo->mOwnedByUs = false;
+                mInputBufferCount++;
+                inQueue.erase(inQueue.begin());
+                mLastInHeader = NULL;
+                inInfo = NULL;
+                notifyEmptyBufferDone(inHeader);
+                inHeader = NULL;
+            } else {
+                ALOGV("inHeader->nFilledLen = %d", inHeader->nFilledLen);
             }
         }
 
diff --git a/media/libstagefright/codecs/avc/enc/SoftAVCEncoder.h b/media/libstagefright/codecs/avc/enc/SoftAVCEncoder.h
index 23d5ff1..cfa9ca5 100644
--- a/media/libstagefright/codecs/avc/enc/SoftAVCEncoder.h
+++ b/media/libstagefright/codecs/avc/enc/SoftAVCEncoder.h
@@ -67,10 +67,6 @@
         kNumBuffers = 2,
     };
 
-    enum {
-        kStoreMetaDataExtensionIndex = OMX_IndexVendorStartUnused + 1
-    };
-
     // OMX input buffer's timestamp and flags
     typedef struct {
         int64_t mTimeUs;
diff --git a/media/libstagefright/codecs/m4v_h263/enc/SoftMPEG4Encoder.h b/media/libstagefright/codecs/m4v_h263/enc/SoftMPEG4Encoder.h
index cc4ea8f..c59a1b9 100644
--- a/media/libstagefright/codecs/m4v_h263/enc/SoftMPEG4Encoder.h
+++ b/media/libstagefright/codecs/m4v_h263/enc/SoftMPEG4Encoder.h
@@ -56,10 +56,6 @@
         kNumBuffers = 2,
     };
 
-    enum {
-        kStoreMetaDataExtensionIndex = OMX_IndexVendorStartUnused + 1
-    };
-
     // OMX input buffer's timestamp and flags
     typedef struct {
         int64_t mTimeUs;
diff --git a/media/libstagefright/codecs/on2/dec/SoftVPX.cpp b/media/libstagefright/codecs/on2/dec/SoftVPX.cpp
index 423a057..2f63bdd 100644
--- a/media/libstagefright/codecs/on2/dec/SoftVPX.cpp
+++ b/media/libstagefright/codecs/on2/dec/SoftVPX.cpp
@@ -23,9 +23,6 @@
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/MediaDefs.h>
 
-#include "vpx/vpx_decoder.h"
-#include "vpx/vpx_codec.h"
-#include "vpx/vp8dx.h"
 
 namespace android {
 
@@ -41,7 +38,8 @@
             NULL /* profileLevels */, 0 /* numProfileLevels */,
             320 /* width */, 240 /* height */, callbacks, appData, component),
       mMode(codingType == OMX_VIDEO_CodingVP8 ? MODE_VP8 : MODE_VP9),
-      mCtx(NULL) {
+      mCtx(NULL),
+      mImg(NULL) {
     initPorts(kNumBuffers, 768 * 1024 /* inputBufferSize */,
             kNumBuffers,
             codingType == OMX_VIDEO_CodingVP8 ? MEDIA_MIMETYPE_VIDEO_VP8 : MEDIA_MIMETYPE_VIDEO_VP9);
@@ -118,36 +116,50 @@
             }
         }
 
-        if (vpx_codec_decode(
-                    (vpx_codec_ctx_t *)mCtx,
-                    inHeader->pBuffer + inHeader->nOffset,
-                    inHeader->nFilledLen,
-                    NULL,
-                    0)) {
-            ALOGE("on2 decoder failed to decode frame.");
+        if (mImg == NULL) {
+            if (vpx_codec_decode(
+                        (vpx_codec_ctx_t *)mCtx,
+                        inHeader->pBuffer + inHeader->nOffset,
+                        inHeader->nFilledLen,
+                        NULL,
+                        0)) {
+                ALOGE("on2 decoder failed to decode frame.");
 
-            notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL);
-            return;
+                notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL);
+                return;
+            }
+            vpx_codec_iter_t iter = NULL;
+            mImg = vpx_codec_get_frame((vpx_codec_ctx_t *)mCtx, &iter);
         }
 
-        vpx_codec_iter_t iter = NULL;
-        vpx_image_t *img = vpx_codec_get_frame((vpx_codec_ctx_t *)mCtx, &iter);
+        if (mImg != NULL) {
+            CHECK_EQ(mImg->fmt, IMG_FMT_I420);
 
-        if (img != NULL) {
-            CHECK_EQ(img->fmt, IMG_FMT_I420);
-
-            uint32_t width = img->d_w;
-            uint32_t height = img->d_h;
+            uint32_t width = mImg->d_w;
+            uint32_t height = mImg->d_h;
 
             if (width != mWidth || height != mHeight) {
                 mWidth = width;
                 mHeight = height;
 
-                updatePortDefinitions();
-
-                notify(OMX_EventPortSettingsChanged, 1, 0, NULL);
-                mOutputPortSettingsChange = AWAITING_DISABLED;
-                return;
+                if (!mIsAdaptive || width > mAdaptiveMaxWidth || height > mAdaptiveMaxHeight) {
+                    if (mIsAdaptive) {
+                        if (width > mAdaptiveMaxWidth) {
+                            mAdaptiveMaxWidth = width;
+                        }
+                        if (height > mAdaptiveMaxHeight) {
+                            mAdaptiveMaxHeight = height;
+                        }
+                    }
+                    updatePortDefinitions();
+                    notify(OMX_EventPortSettingsChanged, kOutputPortIndex, 0, NULL);
+                    mOutputPortSettingsChange = AWAITING_DISABLED;
+                    return;
+                } else {
+                    updatePortDefinitions();
+                    notify(OMX_EventPortSettingsChanged, kOutputPortIndex,
+                           OMX_IndexConfigCommonOutputCrop, NULL);
+                }
             }
 
             outHeader->nOffset = 0;
@@ -155,31 +167,38 @@
             outHeader->nFlags = EOSseen ? OMX_BUFFERFLAG_EOS : 0;
             outHeader->nTimeStamp = inHeader->nTimeStamp;
 
-            const uint8_t *srcLine = (const uint8_t *)img->planes[PLANE_Y];
+            uint32_t buffer_stride = mIsAdaptive ? mAdaptiveMaxWidth : mWidth;
+            uint32_t buffer_height = mIsAdaptive ? mAdaptiveMaxHeight : mHeight;
+
+            const uint8_t *srcLine = (const uint8_t *)mImg->planes[PLANE_Y];
             uint8_t *dst = outHeader->pBuffer;
-            for (size_t i = 0; i < img->d_h; ++i) {
-                memcpy(dst, srcLine, img->d_w);
-
-                srcLine += img->stride[PLANE_Y];
-                dst += img->d_w;
+            for (size_t i = 0; i < buffer_height; ++i) {
+                if (i < mImg->d_h) {
+                    memcpy(dst, srcLine, mImg->d_w);
+                    srcLine += mImg->stride[PLANE_Y];
+                }
+                dst += buffer_stride;
             }
 
-            srcLine = (const uint8_t *)img->planes[PLANE_U];
-            for (size_t i = 0; i < img->d_h / 2; ++i) {
-                memcpy(dst, srcLine, img->d_w / 2);
-
-                srcLine += img->stride[PLANE_U];
-                dst += img->d_w / 2;
+            srcLine = (const uint8_t *)mImg->planes[PLANE_U];
+            for (size_t i = 0; i < buffer_height / 2; ++i) {
+                if (i < mImg->d_h / 2) {
+                    memcpy(dst, srcLine, mImg->d_w / 2);
+                    srcLine += mImg->stride[PLANE_U];
+                }
+                dst += buffer_stride / 2;
             }
 
-            srcLine = (const uint8_t *)img->planes[PLANE_V];
-            for (size_t i = 0; i < img->d_h / 2; ++i) {
-                memcpy(dst, srcLine, img->d_w / 2);
-
-                srcLine += img->stride[PLANE_V];
-                dst += img->d_w / 2;
+            srcLine = (const uint8_t *)mImg->planes[PLANE_V];
+            for (size_t i = 0; i < buffer_height / 2; ++i) {
+                if (i < mImg->d_h / 2) {
+                    memcpy(dst, srcLine, mImg->d_w / 2);
+                    srcLine += mImg->stride[PLANE_V];
+                }
+                dst += buffer_stride / 2;
             }
 
+            mImg = NULL;
             outInfo->mOwnedByUs = false;
             outQueue.erase(outQueue.begin());
             outInfo = NULL;
diff --git a/media/libstagefright/codecs/on2/dec/SoftVPX.h b/media/libstagefright/codecs/on2/dec/SoftVPX.h
index cd5eb28..8f68693 100644
--- a/media/libstagefright/codecs/on2/dec/SoftVPX.h
+++ b/media/libstagefright/codecs/on2/dec/SoftVPX.h
@@ -20,6 +20,10 @@
 
 #include "SoftVideoDecoderOMXComponent.h"
 
+#include "vpx/vpx_decoder.h"
+#include "vpx/vpx_codec.h"
+#include "vpx/vp8dx.h"
+
 namespace android {
 
 struct SoftVPX : public SoftVideoDecoderOMXComponent {
@@ -47,6 +51,8 @@
 
     void *mCtx;
 
+    vpx_image_t *mImg;
+
     status_t initDecoder();
 
     DISALLOW_EVIL_CONSTRUCTORS(SoftVPX);
diff --git a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.h b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.h
index c5a83d1..5b4c954 100644
--- a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.h
+++ b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.h
@@ -91,10 +91,6 @@
             const char *name, OMX_INDEXTYPE *index);
 
 private:
-    enum {
-        kStoreMetaDataExtensionIndex = OMX_IndexVendorStartUnused + 1,
-    };
-
     enum TemporalReferences {
         // For 1 layer case: reference all (last, golden, and alt ref), but only
         // update last.
diff --git a/media/libstagefright/data/media_codecs_google_video.xml b/media/libstagefright/data/media_codecs_google_video.xml
index 9b930bc..c97be28 100644
--- a/media/libstagefright/data/media_codecs_google_video.xml
+++ b/media/libstagefright/data/media_codecs_google_video.xml
@@ -16,18 +16,89 @@
 
 <Included>
     <Decoders>
-        <MediaCodec name="OMX.google.mpeg4.decoder" type="video/mp4v-es" />
-        <MediaCodec name="OMX.google.h263.decoder" type="video/3gpp" />
-        <MediaCodec name="OMX.google.h264.decoder" type="video/avc" />
-        <MediaCodec name="OMX.google.hevc.decoder" type="video/hevc" />
-        <MediaCodec name="OMX.google.vp8.decoder" type="video/x-vnd.on2.vp8" />
-        <MediaCodec name="OMX.google.vp9.decoder" type="video/x-vnd.on2.vp9" />
+        <MediaCodec name="OMX.google.mpeg4.decoder" type="video/mp4v-es">
+            <!-- profiles and levels:  ProfileSimple : Level3 -->
+            <Limit name="size" min="2x2" max="352x288" />
+            <Limit name="alignment" value="2x2" />
+            <Limit name="block-size" value="16x16" />
+            <Limit name="blocks-per-second" range="12-11880" />
+            <Limit name="bitrate" range="1-384000" />
+            <Feature name="adaptive-playback" />
+        </MediaCodec>
+        <MediaCodec name="OMX.google.h263.decoder" type="video/3gpp">
+            <!-- profiles and levels:  ProfileBaseline : Level30, ProfileBaseline : Level45
+                    ProfileISWV2 : Level30, ProfileISWV2 : Level45 -->
+            <Limit name="size" min="2x2" max="352x288" />
+            <Limit name="alignment" value="2x2" />
+            <Limit name="bitrate" range="1-384000" />
+            <Feature name="adaptive-playback" />
+        </MediaCodec>
+        <MediaCodec name="OMX.google.h264.decoder" type="video/avc">
+            <!-- profiles and levels:  ProfileBaseline : Level51 -->
+            <Limit name="size" min="2x2" max="2048x2048" />
+            <Limit name="alignment" value="2x2" />
+            <Limit name="block-size" value="16x16" />
+            <Limit name="blocks-per-second" range="1-983040" />
+            <Limit name="bitrate" range="1-40000000" />
+            <Feature name="adaptive-playback" />
+        </MediaCodec>
+        <MediaCodec name="OMX.google.hevc.decoder" type="video/hevc">
+            <!-- profiles and levels:  ProfileMain : MainTierLevel51 -->
+            <Limit name="size" min="2x2" max="2048x2048" />
+            <Limit name="alignment" value="2x2" />
+            <Limit name="block-size" value="8x8" />
+            <Limit name="block-count" range="1-139264" />
+            <Limit name="blocks-per-second" range="1-2000000" />
+            <Limit name="bitrate" range="1-10000000" />
+            <Feature name="adaptive-playback" />
+        </MediaCodec>
+        <MediaCodec name="OMX.google.vp8.decoder" type="video/x-vnd.on2.vp8">
+            <Limit name="size" min="2x2" max="2048x2048" />
+            <Limit name="alignment" value="2x2" />
+            <Limit name="block-size" value="16x16" />
+            <Limit name="blocks-per-second" range="1-1000000" />
+            <Limit name="bitrate" range="1-40000000" />
+            <Feature name="adaptive-playback" />
+        </MediaCodec>
+        <MediaCodec name="OMX.google.vp9.decoder" type="video/x-vnd.on2.vp9">
+            <Limit name="size" min="2x2" max="2048x2048" />
+            <Limit name="alignment" value="2x2" />
+            <Limit name="block-size" value="16x16" />
+            <Limit name="blocks-per-second" range="1-500000" />
+            <Limit name="bitrate" range="1-40000000" />
+            <Feature name="adaptive-playback" />
+        </MediaCodec>
     </Decoders>
 
     <Encoders>
-        <MediaCodec name="OMX.google.h263.encoder" type="video/3gpp" />
-        <MediaCodec name="OMX.google.h264.encoder" type="video/avc" />
-        <MediaCodec name="OMX.google.mpeg4.encoder" type="video/mp4v-es" />
-        <MediaCodec name="OMX.google.vp8.encoder" type="video/x-vnd.on2.vp8" />
+        <MediaCodec name="OMX.google.h263.encoder" type="video/3gpp">
+            <!-- profiles and levels:  ProfileBaseline : Level45 -->
+            <Limit name="size" min="2x2" max="176x144" />
+            <Limit name="alignment" value="2x2" />
+            <Limit name="bitrate" range="1-128000" />
+        </MediaCodec>
+        <MediaCodec name="OMX.google.h264.encoder" type="video/avc">
+            <!-- profiles and levels:  ProfileBaseline : Level2 -->
+            <Limit name="size" min="2x2" max="896x896" />
+            <Limit name="alignment" value="2x2" />
+            <Limit name="block-size" value="16x16" />
+            <Limit name="blocks-per-second" range="1-11880" />
+            <Limit name="bitrate" range="1-2000000" />
+        </MediaCodec>
+        <MediaCodec name="OMX.google.mpeg4.encoder" type="video/mp4v-es">
+            <!-- profiles and levels:  ProfileCore : Level2 -->
+            <Limit name="size" min="2x2" max="176x144" />
+            <Limit name="alignment" value="2x2" />
+            <Limit name="block-size" value="16x16" />
+            <Limit name="blocks-per-second" range="12-1485" />
+            <Limit name="bitrate" range="1-64000" />
+        </MediaCodec>
+        <MediaCodec name="OMX.google.vp8.encoder" type="video/x-vnd.on2.vp8">
+            <!-- profiles and levels:  ProfileMain : Level_Version0-3 -->
+            <Limit name="size" min="2x2" max="2048x2048" />
+            <Limit name="alignment" value="2x2" />
+            <Limit name="bitrate" range="1-40000000" />
+            <Feature name="bitrate-modes" value="VBR,CBR" />
+        </MediaCodec>
     </Encoders>
 </Included>
diff --git a/media/libstagefright/foundation/ALooper.cpp b/media/libstagefright/foundation/ALooper.cpp
index ebf9d8d..88b1c92 100644
--- a/media/libstagefright/foundation/ALooper.cpp
+++ b/media/libstagefright/foundation/ALooper.cpp
@@ -68,14 +68,14 @@
 
 ALooper::ALooper()
     : mRunningLocally(false) {
+    // clean up stale AHandlers. Doing it here instead of in the destructor avoids
+    // the side effect of objects being deleted from the unregister function recursively.
+    gLooperRoster.unregisterStaleHandlers();
 }
 
 ALooper::~ALooper() {
     stop();
-
-    // Since this looper is "dead" (or as good as dead by now),
-    // have ALooperRoster unregister any handlers still registered for it.
-    gLooperRoster.unregisterStaleHandlers();
+    // stale AHandlers are now cleaned up in the constructor of the next ALooper to come along
 }
 
 void ALooper::setName(const char *name) {
diff --git a/media/libstagefright/foundation/ALooperRoster.cpp b/media/libstagefright/foundation/ALooperRoster.cpp
index 0c181ff..e0dc768 100644
--- a/media/libstagefright/foundation/ALooperRoster.cpp
+++ b/media/libstagefright/foundation/ALooperRoster.cpp
@@ -72,50 +72,40 @@
 }
 
 void ALooperRoster::unregisterStaleHandlers() {
-    Mutex::Autolock autoLock(mLock);
 
-    for (size_t i = mHandlers.size(); i-- > 0;) {
-        const HandlerInfo &info = mHandlers.valueAt(i);
+    Vector<sp<ALooper> > activeLoopers;
+    {
+        Mutex::Autolock autoLock(mLock);
 
-        sp<ALooper> looper = info.mLooper.promote();
-        if (looper == NULL) {
-            ALOGV("Unregistering stale handler %d", mHandlers.keyAt(i));
-            mHandlers.removeItemsAt(i);
+        for (size_t i = mHandlers.size(); i-- > 0;) {
+            const HandlerInfo &info = mHandlers.valueAt(i);
+
+            sp<ALooper> looper = info.mLooper.promote();
+            if (looper == NULL) {
+                ALOGV("Unregistering stale handler %d", mHandlers.keyAt(i));
+                mHandlers.removeItemsAt(i);
+            } else {
+                // At this point 'looper' might be the only sp<> keeping
+                // the object alive. To prevent it from going out of scope
+                // and having ~ALooper call this method again recursively
+                // and then deadlocking because of the Autolock above, add
+                // it to a Vector which will go out of scope after the lock
+                // has been released.
+                activeLoopers.add(looper);
+            }
         }
     }
 }
 
 status_t ALooperRoster::postMessage(
         const sp<AMessage> &msg, int64_t delayUs) {
-    Mutex::Autolock autoLock(mLock);
-    return postMessage_l(msg, delayUs);
-}
 
-status_t ALooperRoster::postMessage_l(
-        const sp<AMessage> &msg, int64_t delayUs) {
-    ssize_t index = mHandlers.indexOfKey(msg->target());
-
-    if (index < 0) {
-        ALOGW("failed to post message '%s'. Target handler not registered.",
-              msg->debugString().c_str());
-        return -ENOENT;
-    }
-
-    const HandlerInfo &info = mHandlers.valueAt(index);
-
-    sp<ALooper> looper = info.mLooper.promote();
+    sp<ALooper> looper = findLooper(msg->target());
 
     if (looper == NULL) {
-        ALOGW("failed to post message. "
-             "Target handler %d still registered, but object gone.",
-             msg->target());
-
-        mHandlers.removeItemsAt(index);
         return -ENOENT;
     }
-
     looper->post(msg, delayUs);
-
     return OK;
 }
 
@@ -169,18 +159,23 @@
 
 status_t ALooperRoster::postAndAwaitResponse(
         const sp<AMessage> &msg, sp<AMessage> *response) {
+    sp<ALooper> looper = findLooper(msg->target());
+
+    if (looper == NULL) {
+        ALOGW("failed to post message. "
+                "Target handler %d still registered, but object gone.",
+                msg->target());
+        response->clear();
+        return -ENOENT;
+    }
+
     Mutex::Autolock autoLock(mLock);
 
     uint32_t replyID = mNextReplyID++;
 
     msg->setInt32("replyID", replyID);
 
-    status_t err = postMessage_l(msg, 0 /* delayUs */);
-
-    if (err != OK) {
-        response->clear();
-        return err;
-    }
+    looper->post(msg, 0 /* delayUs */);
 
     ssize_t index;
     while ((index = mReplies.indexOfKey(replyID)) < 0) {
diff --git a/media/libstagefright/httplive/LiveSession.cpp b/media/libstagefright/httplive/LiveSession.cpp
index 8667a6b..7b18348 100644
--- a/media/libstagefright/httplive/LiveSession.cpp
+++ b/media/libstagefright/httplive/LiveSession.cpp
@@ -1019,11 +1019,19 @@
 }
 
 size_t LiveSession::getTrackCount() const {
-    return mPlaylist->getTrackCount();
+    if (mPlaylist == NULL) {
+        return 0;
+    } else {
+        return mPlaylist->getTrackCount();
+    }
 }
 
 sp<AMessage> LiveSession::getTrackInfo(size_t trackIndex) const {
-    return mPlaylist->getTrackInfo(trackIndex);
+    if (mPlaylist == NULL) {
+        return NULL;
+    } else {
+        return mPlaylist->getTrackInfo(trackIndex);
+    }
 }
 
 status_t LiveSession::selectTrack(size_t index, bool select) {
diff --git a/media/libstagefright/httplive/PlaylistFetcher.cpp b/media/libstagefright/httplive/PlaylistFetcher.cpp
index 80cb2d0..82a4c39 100644
--- a/media/libstagefright/httplive/PlaylistFetcher.cpp
+++ b/media/libstagefright/httplive/PlaylistFetcher.cpp
@@ -490,11 +490,11 @@
 
     mStreamTypeMask = streamTypeMask;
 
-    mStartTimeUs = startTimeUs;
     mSegmentStartTimeUs = segmentStartTimeUs;
     mDiscontinuitySeq = startDiscontinuitySeq;
 
-    if (mStartTimeUs >= 0ll) {
+    if (startTimeUs >= 0) {
+        mStartTimeUs = startTimeUs;
         mSeqNumber = -1;
         mStartup = true;
         mPrepared = false;
@@ -754,6 +754,9 @@
             if (!mPlaylist->isComplete() && !mPlaylist->isEvent()) {
                 // If this is a live session, start 3 segments from the end on connect
                 mSeqNumber = lastSeqNumberInPlaylist - 3;
+                if (mSeqNumber < firstSeqNumberInPlaylist) {
+                    mSeqNumber = firstSeqNumberInPlaylist;
+                }
             } else {
                 mSeqNumber = getSeqNumberForTime(mStartTimeUs);
                 mStartTimeUs -= getSegmentStartTimeUs(mSeqNumber);
diff --git a/media/libstagefright/include/SimpleSoftOMXComponent.h b/media/libstagefright/include/SimpleSoftOMXComponent.h
index f8c61eb..591b38e 100644
--- a/media/libstagefright/include/SimpleSoftOMXComponent.h
+++ b/media/libstagefright/include/SimpleSoftOMXComponent.h
@@ -58,6 +58,11 @@
         } mTransition;
     };
 
+    enum {
+        kStoreMetaDataExtensionIndex = OMX_IndexVendorStartUnused + 1,
+        kPrepareForAdaptivePlaybackIndex,
+    };
+
     void addPort(const OMX_PARAM_PORTDEFINITIONTYPE &def);
 
     virtual OMX_ERRORTYPE internalGetParameter(
diff --git a/media/libstagefright/include/SoftVideoDecoderOMXComponent.h b/media/libstagefright/include/SoftVideoDecoderOMXComponent.h
index 7f200dd..ee553d9 100644
--- a/media/libstagefright/include/SoftVideoDecoderOMXComponent.h
+++ b/media/libstagefright/include/SoftVideoDecoderOMXComponent.h
@@ -55,6 +55,9 @@
     virtual OMX_ERRORTYPE getConfig(
             OMX_INDEXTYPE index, OMX_PTR params);
 
+    virtual OMX_ERRORTYPE getExtensionIndex(
+            const char *name, OMX_INDEXTYPE *index);
+
     void initPorts(OMX_U32 numInputBuffers,
             OMX_U32 inputBufferSize,
             OMX_U32 numOutputBuffers,
@@ -68,6 +71,8 @@
         kMaxPortIndex = 1,
     };
 
+    bool mIsAdaptive;
+    uint32_t mAdaptiveMaxWidth, mAdaptiveMaxHeight;
     uint32_t mWidth, mHeight;
     uint32_t mCropLeft, mCropTop, mCropWidth, mCropHeight;
 
diff --git a/media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp b/media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp
index 1c383f7..69b572e 100644
--- a/media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp
+++ b/media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp
@@ -22,6 +22,7 @@
 
 #include "include/SoftVideoDecoderOMXComponent.h"
 
+#include <media/hardware/HardwareAPI.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/ALooper.h>
 #include <media/stagefright/foundation/AMessage.h>
@@ -50,6 +51,9 @@
         OMX_PTR appData,
         OMX_COMPONENTTYPE **component)
         : SimpleSoftOMXComponent(name, callbacks, appData, component),
+        mIsAdaptive(false),
+        mAdaptiveMaxWidth(0),
+        mAdaptiveMaxHeight(0),
         mWidth(width),
         mHeight(height),
         mCropLeft(0),
@@ -127,8 +131,8 @@
     def->format.video.nSliceHeight = def->format.video.nFrameHeight;
 
     def = &editPortInfo(kOutputPortIndex)->mDef;
-    def->format.video.nFrameWidth = mWidth;
-    def->format.video.nFrameHeight = mHeight;
+    def->format.video.nFrameWidth = mIsAdaptive ? mAdaptiveMaxWidth : mWidth;
+    def->format.video.nFrameHeight = mIsAdaptive ? mAdaptiveMaxHeight : mHeight;
     def->format.video.nStride = def->format.video.nFrameWidth;
     def->format.video.nSliceHeight = def->format.video.nFrameHeight;
 
@@ -199,7 +203,10 @@
 
 OMX_ERRORTYPE SoftVideoDecoderOMXComponent::internalSetParameter(
         OMX_INDEXTYPE index, const OMX_PTR params) {
-    switch (index) {
+    // Include extension index OMX_INDEXEXTTYPE.
+    const int32_t indexFull = index;
+
+    switch (indexFull) {
         case OMX_IndexParamStandardComponentRole:
         {
             const OMX_PARAM_COMPONENTROLETYPE *roleParams =
@@ -230,6 +237,24 @@
             return OMX_ErrorNone;
         }
 
+        case kPrepareForAdaptivePlaybackIndex:
+        {
+            const PrepareForAdaptivePlaybackParams* adaptivePlaybackParams =
+                    (const PrepareForAdaptivePlaybackParams *)params;
+            mIsAdaptive = adaptivePlaybackParams->bEnable;
+            if (mIsAdaptive) {
+                mAdaptiveMaxWidth = adaptivePlaybackParams->nMaxFrameWidth;
+                mAdaptiveMaxHeight = adaptivePlaybackParams->nMaxFrameHeight;
+                mWidth = mAdaptiveMaxWidth;
+                mHeight = mAdaptiveMaxHeight;
+            } else {
+                mAdaptiveMaxWidth = 0;
+                mAdaptiveMaxHeight = 0;
+            }
+            updatePortDefinitions();
+            return OMX_ErrorNone;
+        }
+
         default:
             return SimpleSoftOMXComponent::internalSetParameter(index, params);
     }
@@ -259,6 +284,16 @@
     }
 }
 
+OMX_ERRORTYPE SoftVideoDecoderOMXComponent::getExtensionIndex(
+        const char *name, OMX_INDEXTYPE *index) {
+    if (!strcmp(name, "OMX.google.android.index.prepareForAdaptivePlayback")) {
+        *(int32_t*)index = kPrepareForAdaptivePlaybackIndex;
+        return OMX_ErrorNone;
+    }
+
+    return SimpleSoftOMXComponent::getExtensionIndex(name, index);
+}
+
 void SoftVideoDecoderOMXComponent::onReset() {
     mOutputPortSettingsChange = NONE;
 }
diff --git a/media/libstagefright/rtsp/AMPEG4ElementaryAssembler.cpp b/media/libstagefright/rtsp/AMPEG4ElementaryAssembler.cpp
index 98b50dd..7eb6542 100644
--- a/media/libstagefright/rtsp/AMPEG4ElementaryAssembler.cpp
+++ b/media/libstagefright/rtsp/AMPEG4ElementaryAssembler.cpp
@@ -249,11 +249,15 @@
         mPackets.push_back(buffer);
     } else {
         // hexdump(buffer->data(), buffer->size());
+        if (buffer->size() < 2) {
+            return MALFORMED_PACKET;
+        }
 
-        CHECK_GE(buffer->size(), 2u);
         unsigned AU_headers_length = U16_AT(buffer->data());  // in bits
 
-        CHECK_GE(buffer->size(), 2 + (AU_headers_length + 7) / 8);
+        if (buffer->size() < 2 + (AU_headers_length + 7) / 8) {
+            return MALFORMED_PACKET;
+        }
 
         List<AUHeader> headers;
 
@@ -342,7 +346,9 @@
              it != headers.end(); ++it) {
             const AUHeader &header = *it;
 
-            CHECK_LE(offset + header.mSize, buffer->size());
+            if (buffer->size() < offset + header.mSize) {
+                return MALFORMED_PACKET;
+            }
 
             sp<ABuffer> accessUnit = new ABuffer(header.mSize);
             memcpy(accessUnit->data(), buffer->data() + offset, header.mSize);
@@ -353,7 +359,10 @@
             mPackets.push_back(accessUnit);
         }
 
-        CHECK_EQ(offset, buffer->size());
+        if (offset != buffer->size()) {
+            ALOGW("potentially malformed packet (offset %d, size %d)",
+                    offset, buffer->size());
+        }
     }
 
     queue->erase(queue->begin());
@@ -400,6 +409,7 @@
         const sp<ARTPSource> &source) {
     AssemblyStatus status = addPacket(source);
     if (status == MALFORMED_PACKET) {
+        ALOGI("access unit is damaged");
         mAccessUnitDamaged = true;
     }
     return status;
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 1f77b2f..1843722 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -1941,9 +1941,8 @@
             TEE_SINK_NEW,   // copy input using a new pipe
             TEE_SINK_OLD,   // copy input using an existing pipe
         } kind;
-        NBAIO_Format format = Format_from_SR_C(inStream->common.get_sample_rate(&inStream->common),
-                audio_channel_count_from_in_mask(
-                        inStream->common.get_channels(&inStream->common)));
+        NBAIO_Format format = Format_from_SR_C(halconfig.sample_rate,
+                audio_channel_count_from_in_mask(halconfig.channel_mask), halconfig.format);
         if (!mTeeSinkInputEnabled) {
             kind = TEE_SINK_NO;
         } else if (!Format_isValid(format)) {
@@ -2700,24 +2699,26 @@
         // if 2 dumpsys are done within 1 second, and rotation didn't work, then discard 2nd
         int teeFd = open(teePath, O_WRONLY | O_CREAT | O_EXCL | O_NOFOLLOW, S_IRUSR | S_IWUSR);
         if (teeFd >= 0) {
+            // FIXME use libsndfile
             char wavHeader[44];
             memcpy(wavHeader,
                 "RIFF\0\0\0\0WAVEfmt \20\0\0\0\1\0\2\0\104\254\0\0\0\0\0\0\4\0\20\0data\0\0\0\0",
                 sizeof(wavHeader));
             NBAIO_Format format = teeSource->format();
             unsigned channelCount = Format_channelCount(format);
-            ALOG_ASSERT(channelCount <= FCC_2);
             uint32_t sampleRate = Format_sampleRate(format);
+            size_t frameSize = Format_frameSize(format);
             wavHeader[22] = channelCount;       // number of channels
             wavHeader[24] = sampleRate;         // sample rate
             wavHeader[25] = sampleRate >> 8;
-            wavHeader[32] = channelCount * 2;   // block alignment
+            wavHeader[32] = frameSize;          // block alignment
+            wavHeader[33] = frameSize >> 8;
             write(teeFd, wavHeader, sizeof(wavHeader));
             size_t total = 0;
             bool firstRead = true;
+#define TEE_SINK_READ 1024                      // frames per I/O operation
+            void *buffer = malloc(TEE_SINK_READ * frameSize);
             for (;;) {
-#define TEE_SINK_READ 1024
-                short buffer[TEE_SINK_READ * FCC_2];
                 size_t count = TEE_SINK_READ;
                 ssize_t actual = teeSource->read(buffer, count,
                         AudioBufferProvider::kInvalidPTS);
@@ -2730,14 +2731,17 @@
                     break;
                 }
                 ALOG_ASSERT(actual <= (ssize_t)count);
-                write(teeFd, buffer, actual * channelCount * sizeof(short));
+                write(teeFd, buffer, actual * frameSize);
                 total += actual;
             }
+            free(buffer);
             lseek(teeFd, (off_t) 4, SEEK_SET);
-            uint32_t temp = 44 + total * channelCount * sizeof(short) - 8;
+            uint32_t temp = 44 + total * frameSize - 8;
+            // FIXME not big-endian safe
             write(teeFd, &temp, sizeof(temp));
             lseek(teeFd, (off_t) 40, SEEK_SET);
-            temp =  total * channelCount * sizeof(short);
+            temp =  total * frameSize;
+            // FIXME not big-endian safe
             write(teeFd, &temp, sizeof(temp));
             close(teeFd);
             if (fd >= 0) {
diff --git a/services/audioflinger/AudioMixer.cpp b/services/audioflinger/AudioMixer.cpp
index 7ac2c0c..fd28ea1 100644
--- a/services/audioflinger/AudioMixer.cpp
+++ b/services/audioflinger/AudioMixer.cpp
@@ -1068,6 +1068,9 @@
 
     // compute everything we need...
     int countActiveTracks = 0;
+    // TODO: fix all16BitsStereNoResample logic to
+    // either properly handle muted tracks (it should ignore them)
+    // or remove altogether as an obsolete optimization.
     bool all16BitsStereoNoResample = true;
     bool resampling = false;
     bool volumeRamp = false;
@@ -1152,8 +1155,15 @@
                 if (countActiveTracks == 1) {
                     const int i = 31 - __builtin_clz(state->enabledTracks);
                     track_t& t = state->tracks[i];
-                    state->hook = getProcessHook(PROCESSTYPE_NORESAMPLEONETRACK,
-                            t.mMixerChannelCount, t.mMixerInFormat, t.mMixerFormat);
+                    if ((t.needs & NEEDS_MUTE) == 0) {
+                        // The check prevents a muted track from acquiring a process hook.
+                        //
+                        // This is dangerous if the track is MONO as that requires
+                        // special case handling due to implicit channel duplication.
+                        // Stereo or Multichannel should actually be fine here.
+                        state->hook = getProcessHook(PROCESSTYPE_NORESAMPLEONETRACK,
+                                t.mMixerChannelCount, t.mMixerInFormat, t.mMixerFormat);
+                    }
                 }
             }
         }
@@ -1188,6 +1198,7 @@
             if (countActiveTracks == 1) {
                 const int i = 31 - __builtin_clz(state->enabledTracks);
                 track_t& t = state->tracks[i];
+                // Muted single tracks handled by allMuted above.
                 state->hook = getProcessHook(PROCESSTYPE_NORESAMPLEONETRACK,
                         t.mMixerChannelCount, t.mMixerInFormat, t.mMixerFormat);
             }
@@ -1745,9 +1756,10 @@
         if (in == NULL || (((uintptr_t)in) & 3)) {
             memset(out, 0, numFrames
                     * t.mMixerChannelCount * audio_bytes_per_sample(t.mMixerFormat));
-            ALOGE_IF((((uintptr_t)in) & 3), "process stereo track: input buffer alignment pb: "
-                                              "buffer %p track %d, channels %d, needs %08x",
-                    in, i, t.channelCount, t.needs);
+            ALOGE_IF((((uintptr_t)in) & 3),
+                    "process__OneTrack16BitsStereoNoResampling: misaligned buffer"
+                    " %p track %d, channels %d, needs %08x, volume %08x vfl %f vfr %f",
+                    in, i, t.channelCount, t.needs, vrl, t.mVolume[0], t.mVolume[1]);
             return;
         }
         size_t outFrames = b.frameCount;
@@ -2173,6 +2185,10 @@
 
 /* Returns the proper process hook for mixing tracks. Currently works only for
  * PROCESSTYPE_NORESAMPLEONETRACK, a mix involving one track, no resampling.
+ *
+ * TODO: Due to the special mixing considerations of duplicating to
+ * a stereo output track, the input track cannot be MONO.  This should be
+ * prevented by the caller.
  */
 AudioMixer::process_hook_t AudioMixer::getProcessHook(int processType, uint32_t channelCount,
         audio_format_t mixerInFormat, audio_format_t mixerOutFormat)
diff --git a/services/audioflinger/FastMixer.cpp b/services/audioflinger/FastMixer.cpp
index 9e15293..2678cbf 100644
--- a/services/audioflinger/FastMixer.cpp
+++ b/services/audioflinger/FastMixer.cpp
@@ -420,7 +420,7 @@
         // if non-NULL, then duplicate write() to this non-blocking sink
         NBAIO_Sink* teeSink;
         if ((teeSink = current->mTeeSink) != NULL) {
-            (void) teeSink->write(mMixerBuffer, frameCount);
+            (void) teeSink->write(buffer, frameCount);
         }
         // FIXME write() is non-blocking and lock-free for a properly implemented NBAIO sink,
         //       but this code should be modified to handle both non-blocking and blocking sinks
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index f721d5c..942bff6 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -3662,6 +3662,10 @@
     // remove all the tracks that need to be...
     removeTracks_l(*tracksToRemove);
 
+    if (getEffectChain_l(AUDIO_SESSION_OUTPUT_MIX) != 0) {
+        mEffectBufferValid = true;
+    }
+
     // sink or mix buffer must be cleared if all tracks are connected to an
     // effect chain as in this case the mixer will not write to the sink or mix buffer
     // and track effects will accumulate into it
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index c5ab832..6cbab04 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -182,7 +182,7 @@
 
 #ifdef TEE_SINK
         if (mTeeSinkTrackEnabled) {
-            NBAIO_Format pipeFormat = Format_from_SR_C(mSampleRate, mChannelCount);
+            NBAIO_Format pipeFormat = Format_from_SR_C(mSampleRate, mChannelCount, mFormat);
             if (Format_isValid(pipeFormat)) {
                 Pipe *pipe = new Pipe(mTeeSinkTrackFrames, pipeFormat);
                 size_t numCounterOffers = 0;
diff --git a/services/audiopolicy/AudioPolicyEffects.cpp b/services/audiopolicy/AudioPolicyEffects.cpp
index cc0e965..c45acd0 100644
--- a/services/audiopolicy/AudioPolicyEffects.cpp
+++ b/services/audiopolicy/AudioPolicyEffects.cpp
@@ -98,8 +98,12 @@
         inputDesc = new EffectVector(audioSession);
         mInputs.add(input, inputDesc);
     } else {
+        // EffectVector is existing and we just need to increase ref count
         inputDesc = mInputs.valueAt(idx);
     }
+    inputDesc->mRefCount++;
+
+    ALOGV("addInputEffects(): input: %d, refCount: %d", input, inputDesc->mRefCount);
 
     Vector <EffectDesc *> effects = mInputSources.valueAt(index)->mEffects;
     for (size_t i = 0; i < effects.size(); i++) {
@@ -133,10 +137,14 @@
         return status;
     }
     EffectVector *inputDesc = mInputs.valueAt(index);
-    setProcessorEnabled(inputDesc, false);
-    delete inputDesc;
-    mInputs.removeItemsAt(index);
-    ALOGV("releaseInputEffects(): all effects released");
+    inputDesc->mRefCount--;
+    ALOGV("releaseInputEffects(): input: %d, refCount: %d", input, inputDesc->mRefCount);
+    if (inputDesc->mRefCount == 0) {
+        setProcessorEnabled(inputDesc, false);
+        delete inputDesc;
+        mInputs.removeItemsAt(index);
+        ALOGV("releaseInputEffects(): all effects released");
+    }
     return status;
 }
 
@@ -223,8 +231,12 @@
         procDesc = new EffectVector(audioSession);
         mOutputSessions.add(audioSession, procDesc);
     } else {
+        // EffectVector is existing and we just need to increase ref count
         procDesc = mOutputSessions.valueAt(idx);
     }
+    procDesc->mRefCount++;
+
+    ALOGV("addOutputSessionEffects(): session: %d, refCount: %d", audioSession, procDesc->mRefCount);
 
     Vector <EffectDesc *> effects = mOutputStreams.valueAt(index)->mEffects;
     for (size_t i = 0; i < effects.size(); i++) {
@@ -262,12 +274,16 @@
     }
 
     EffectVector *procDesc = mOutputSessions.valueAt(index);
-    setProcessorEnabled(procDesc, false);
-    procDesc->mEffects.clear();
-    delete procDesc;
-    mOutputSessions.removeItemsAt(index);
-    ALOGV("releaseOutputSessionEffects(): output processing released from session: %d",
-          audioSession);
+    procDesc->mRefCount--;
+    ALOGV("releaseOutputSessionEffects(): session: %d, refCount: %d", audioSession, procDesc->mRefCount);
+    if (procDesc->mRefCount == 0) {
+        setProcessorEnabled(procDesc, false);
+        procDesc->mEffects.clear();
+        delete procDesc;
+        mOutputSessions.removeItemsAt(index);
+        ALOGV("releaseOutputSessionEffects(): output processing released from session: %d",
+              audioSession);
+    }
     return status;
 }
 
diff --git a/services/audiopolicy/AudioPolicyEffects.h b/services/audiopolicy/AudioPolicyEffects.h
index 351cb1a..dbe0d0e 100644
--- a/services/audiopolicy/AudioPolicyEffects.h
+++ b/services/audiopolicy/AudioPolicyEffects.h
@@ -131,9 +131,11 @@
     // class to store voctor of AudioEffects
     class EffectVector {
     public:
-        EffectVector(int session) : mSessionId(session) {}
+        EffectVector(int session) : mSessionId(session), mRefCount(0) {}
         /*virtual*/ ~EffectVector() {}
         const int mSessionId;
+        // AudioPolicyManager keeps mLock, no need for lock on reference count here
+        int mRefCount;
         Vector< sp<AudioEffect> >mEffects;
     };
 
diff --git a/services/audiopolicy/AudioPolicyManager.cpp b/services/audiopolicy/AudioPolicyManager.cpp
index 14fdec5..084c853 100644
--- a/services/audiopolicy/AudioPolicyManager.cpp
+++ b/services/audiopolicy/AudioPolicyManager.cpp
@@ -271,7 +271,13 @@
                 return INVALID_OPERATION;
             }
 
-            ALOGV("setDeviceConnectionState() disconnecting device %x", device);
+            ALOGV("setDeviceConnectionState() disconnecting output device %x", device);
+
+            // Set Disconnect to HALs
+            AudioParameter param = AudioParameter(address);
+            param.addInt(String8(AUDIO_PARAMETER_DEVICE_DISCONNECT), device);
+            mpClientInterface->setParameters(AUDIO_IO_HANDLE_NONE, param.toString());
+
             // remove device from available output devices
             mAvailableOutputDevices.remove(devDesc);
 
@@ -368,8 +374,17 @@
                 ALOGW("setDeviceConnectionState() device not connected: %d", device);
                 return INVALID_OPERATION;
             }
+
+            ALOGV("setDeviceConnectionState() disconnecting input device %x", device);
+
+            // Set Disconnect to HALs
+            AudioParameter param = AudioParameter(address);
+            param.addInt(String8(AUDIO_PARAMETER_DEVICE_DISCONNECT), device);
+            mpClientInterface->setParameters(AUDIO_IO_HANDLE_NONE, param.toString());
+
             checkInputsForDevice(device, state, inputs, address);
             mAvailableInputDevices.remove(devDesc);
+
         } break;
 
         default:
@@ -1282,21 +1297,23 @@
     audio_io_handle_t input = AUDIO_IO_HANDLE_NONE;
 
     bool isSoundTrigger = false;
+    audio_source_t halInputSource = inputSource;
     if (inputSource == AUDIO_SOURCE_HOTWORD) {
         ssize_t index = mSoundTriggerSessions.indexOfKey(session);
         if (index >= 0) {
             input = mSoundTriggerSessions.valueFor(session);
             isSoundTrigger = true;
             ALOGV("SoundTrigger capture on session %d input %d", session, input);
+        } else {
+            halInputSource = AUDIO_SOURCE_VOICE_RECOGNITION;
         }
     }
-
     status_t status = mpClientInterface->openInput(profile->mModule->mHandle,
                                                    &input,
                                                    &config,
                                                    &device,
                                                    String8(""),
-                                                   inputSource,
+                                                   halInputSource,
                                                    flags);
 
     // only accept input with the exact requested set of parameters
@@ -1454,19 +1471,31 @@
         return;
     }
 
-    mpClientInterface->closeInput(input);
-    mInputs.removeItem(input);
-    nextAudioPortGeneration();
+    closeInput(input);
     mpClientInterface->onAudioPortListUpdate();
     ALOGV("releaseInput() exit");
 }
 
 void AudioPolicyManager::closeAllInputs() {
+    bool patchRemoved = false;
+
     for(size_t input_index = 0; input_index < mInputs.size(); input_index++) {
+        sp<AudioInputDescriptor> inputDesc = mInputs.valueAt(input_index);
+        ssize_t patch_index = mAudioPatches.indexOfKey(inputDesc->mPatchHandle);
+        if (patch_index >= 0) {
+            sp<AudioPatch> patchDesc = mAudioPatches.valueAt(patch_index);
+            status_t status = mpClientInterface->releaseAudioPatch(patchDesc->mAfPatchHandle, 0);
+            mAudioPatches.removeItemsAt(patch_index);
+            patchRemoved = true;
+        }
         mpClientInterface->closeInput(mInputs.keyAt(input_index));
     }
     mInputs.clear();
     nextAudioPortGeneration();
+
+    if (patchRemoved) {
+        mpClientInterface->onAudioPatchListUpdate();
+    }
 }
 
 void AudioPolicyManager::initStreamVolume(audio_stream_type_t stream,
@@ -3497,6 +3526,16 @@
         }
     }
 
+    nextAudioPortGeneration();
+
+    ssize_t index = mAudioPatches.indexOfKey(outputDesc->mPatchHandle);
+    if (index >= 0) {
+        sp<AudioPatch> patchDesc = mAudioPatches.valueAt(index);
+        status_t status = mpClientInterface->releaseAudioPatch(patchDesc->mAfPatchHandle, 0);
+        mAudioPatches.removeItemsAt(index);
+        mpClientInterface->onAudioPatchListUpdate();
+    }
+
     AudioParameter param;
     param.add(String8("closing"), String8("true"));
     mpClientInterface->setParameters(output, param.toString());
@@ -3504,7 +3543,30 @@
     mpClientInterface->closeOutput(output);
     mOutputs.removeItem(output);
     mPreviousOutputs = mOutputs;
+}
+
+void AudioPolicyManager::closeInput(audio_io_handle_t input)
+{
+    ALOGV("closeInput(%d)", input);
+
+    sp<AudioInputDescriptor> inputDesc = mInputs.valueFor(input);
+    if (inputDesc == NULL) {
+        ALOGW("closeInput() unknown input %d", input);
+        return;
+    }
+
     nextAudioPortGeneration();
+
+    ssize_t index = mAudioPatches.indexOfKey(inputDesc->mPatchHandle);
+    if (index >= 0) {
+        sp<AudioPatch> patchDesc = mAudioPatches.valueAt(index);
+        status_t status = mpClientInterface->releaseAudioPatch(patchDesc->mAfPatchHandle, 0);
+        mAudioPatches.removeItemsAt(index);
+        mpClientInterface->onAudioPatchListUpdate();
+    }
+
+    mpClientInterface->closeInput(input);
+    mInputs.removeItem(input);
 }
 
 SortedVector<audio_io_handle_t> AudioPolicyManager::getOutputsForDevice(audio_devices_t device,
@@ -3874,7 +3936,7 @@
             if (((mAvailableInputDevices.types() &
                     AUDIO_DEVICE_IN_TELEPHONY_RX & ~AUDIO_DEVICE_BIT_IN) == 0) ||
                     (((txDevice & availablePrimaryInputDevices() & ~AUDIO_DEVICE_BIT_IN) != 0) &&
-                         (hwOutputDesc->mAudioPort->mModule->mHalVersion <
+                         (hwOutputDesc->getAudioPort()->mModule->mHalVersion <
                              AUDIO_DEVICE_API_VERSION_3_0))) {
                 availableOutputDeviceTypes = availablePrimaryOutputDevices();
             }
@@ -4257,6 +4319,20 @@
                 mpClientInterface->onAudioPatchListUpdate();
             }
         }
+
+        // inform all input as well
+        for (size_t i = 0; i < mInputs.size(); i++) {
+            const sp<AudioInputDescriptor>  inputDescriptor = mInputs.valueAt(i);
+            if (!isVirtualInputDevice(inputDescriptor->mDevice)) {
+                AudioParameter inputCmd = AudioParameter();
+                ALOGV("%s: inform input %d of device:%d", __func__,
+                      inputDescriptor->mIoHandle, device);
+                inputCmd.addInt(String8(AudioParameter::keyRouting),device);
+                mpClientInterface->setParameters(inputDescriptor->mIoHandle,
+                                                 inputCmd.toString(),
+                                                 delayMs);
+            }
+        }
     }
 
     // update stream volumes according to new device
@@ -4558,8 +4634,15 @@
         //  - one A2DP device + another device: happens with duplicated output. In this case
         // retain the device on the A2DP output as the other must not correspond to an active
         // selection if not the speaker.
+        //  - HDMI-CEC system audio mode only output: give priority to available item in order.
         if (device & AUDIO_DEVICE_OUT_SPEAKER) {
             device = AUDIO_DEVICE_OUT_SPEAKER;
+        } else if (device & AUDIO_DEVICE_OUT_HDMI_ARC) {
+            device = AUDIO_DEVICE_OUT_HDMI_ARC;
+        } else if (device & AUDIO_DEVICE_OUT_AUX_LINE) {
+            device = AUDIO_DEVICE_OUT_AUX_LINE;
+        } else if (device & AUDIO_DEVICE_OUT_SPDIF) {
+            device = AUDIO_DEVICE_OUT_SPDIF;
         } else {
             device = (audio_devices_t)(device & AUDIO_DEVICE_OUT_ALL_A2DP);
         }
@@ -5070,7 +5153,6 @@
         mStrategyMutedByDevice[i] = false;
     }
     if (profile != NULL) {
-        mAudioPort = profile;
         mFlags = profile->mFlags;
         mSamplingRate = profile->pickSamplingRate();
         mFormat = profile->pickFormat();
@@ -5253,7 +5335,6 @@
       mInputSource(AUDIO_SOURCE_DEFAULT), mProfile(profile), mIsSoundTrigger(false)
 {
     if (profile != NULL) {
-        mAudioPort = profile;
         mSamplingRate = profile->pickSamplingRate();
         mFormat = profile->pickFormat();
         mChannelMask = profile->pickChannelMask();
@@ -6273,33 +6354,34 @@
     localBackupConfig.config_mask = config->config_mask;
     toAudioPortConfig(&localBackupConfig);
 
-    if (mAudioPort == 0) {
+    sp<AudioPort> audioport = getAudioPort();
+    if (audioport == 0) {
         status = NO_INIT;
         goto exit;
     }
     if (config->config_mask & AUDIO_PORT_CONFIG_SAMPLE_RATE) {
-        status = mAudioPort->checkExactSamplingRate(config->sample_rate);
+        status = audioport->checkExactSamplingRate(config->sample_rate);
         if (status != NO_ERROR) {
             goto exit;
         }
         mSamplingRate = config->sample_rate;
     }
     if (config->config_mask & AUDIO_PORT_CONFIG_CHANNEL_MASK) {
-        status = mAudioPort->checkExactChannelMask(config->channel_mask);
+        status = audioport->checkExactChannelMask(config->channel_mask);
         if (status != NO_ERROR) {
             goto exit;
         }
         mChannelMask = config->channel_mask;
     }
     if (config->config_mask & AUDIO_PORT_CONFIG_FORMAT) {
-        status = mAudioPort->checkFormat(config->format);
+        status = audioport->checkFormat(config->format);
         if (status != NO_ERROR) {
             goto exit;
         }
         mFormat = config->format;
     }
     if (config->config_mask & AUDIO_PORT_CONFIG_GAIN) {
-        status = mAudioPort->checkGain(&config->gain, config->gain.index);
+        status = audioport->checkGain(&config->gain, config->gain.index);
         if (status != NO_ERROR) {
             goto exit;
         }
@@ -6486,7 +6568,6 @@
                              NULL),
                      mDeviceType(type), mAddress(""), mId(0)
 {
-    mAudioPort = this;
     if (mGains.size() > 0) {
         mGains[0]->getDefaultConfig(&mGain);
     }
diff --git a/services/audiopolicy/AudioPolicyManager.h b/services/audiopolicy/AudioPolicyManager.h
index e3e3172..57e015e 100644
--- a/services/audiopolicy/AudioPolicyManager.h
+++ b/services/audiopolicy/AudioPolicyManager.h
@@ -297,7 +297,7 @@
                                           struct audio_port_config *backupConfig = NULL);
             virtual void toAudioPortConfig(struct audio_port_config *dstConfig,
                                    const struct audio_port_config *srcConfig = NULL) const = 0;
-            sp<AudioPort> mAudioPort;
+            virtual sp<AudioPort> getAudioPort() const = 0;
             uint32_t mSamplingRate;
             audio_format_t mFormat;
             audio_channel_mask_t mChannelMask;
@@ -330,6 +330,7 @@
             bool equals(const sp<DeviceDescriptor>& other) const;
             virtual void toAudioPortConfig(struct audio_port_config *dstConfig,
                                    const struct audio_port_config *srcConfig = NULL) const;
+            virtual sp<AudioPort> getAudioPort() const { return (AudioPort*) this; }
 
             virtual void toAudioPort(struct audio_port *port) const;
 
@@ -462,6 +463,7 @@
 
             virtual void toAudioPortConfig(struct audio_port_config *dstConfig,
                                    const struct audio_port_config *srcConfig = NULL) const;
+            virtual sp<AudioPort> getAudioPort() const { return mProfile; }
             void toAudioPort(struct audio_port *port) const;
 
             audio_port_handle_t mId;
@@ -506,6 +508,7 @@
 
             virtual void toAudioPortConfig(struct audio_port_config *dstConfig,
                                    const struct audio_port_config *srcConfig = NULL) const;
+            virtual sp<AudioPort> getAudioPort() const { return mProfile; }
             void toAudioPort(struct audio_port *port) const;
         };
 
@@ -646,6 +649,9 @@
         // close an output and its companion duplicating output.
         void closeOutput(audio_io_handle_t output);
 
+        // close an input.
+        void closeInput(audio_io_handle_t input);
+
         // checks and if necessary changes outputs used for all strategies.
         // must be called every time a condition that affects the output choice for a given strategy
         // changes: connected device, phone state, force use...
diff --git a/services/audiopolicy/AudioPolicyService.cpp b/services/audiopolicy/AudioPolicyService.cpp
index 7f14960..50bb8c7 100644
--- a/services/audiopolicy/AudioPolicyService.cpp
+++ b/services/audiopolicy/AudioPolicyService.cpp
@@ -765,7 +765,16 @@
         sp<AudioCommand> command2 = mAudioCommands[i];
         // commands are sorted by increasing time stamp: no need to scan the rest of mAudioCommands
         if (command2->mTime <= command->mTime) break;
-        if (command2->mCommand != command->mCommand) continue;
+
+        // create audio patch or release audio patch commands are equivalent
+        // with regard to filtering
+        if ((command->mCommand == CREATE_AUDIO_PATCH) ||
+                (command->mCommand == RELEASE_AUDIO_PATCH)) {
+            if ((command2->mCommand != CREATE_AUDIO_PATCH) &&
+                    (command2->mCommand != RELEASE_AUDIO_PATCH)) {
+                continue;
+            }
+        } else if (command2->mCommand != command->mCommand) continue;
 
         switch (command->mCommand) {
         case SET_PARAMETERS: {
@@ -817,6 +826,31 @@
             // command status as the command is now delayed
             delayMs = 1;
         } break;
+
+        case CREATE_AUDIO_PATCH:
+        case RELEASE_AUDIO_PATCH: {
+            audio_patch_handle_t handle;
+            if (command->mCommand == CREATE_AUDIO_PATCH) {
+                handle = ((CreateAudioPatchData *)command->mParam.get())->mHandle;
+            } else {
+                handle = ((ReleaseAudioPatchData *)command->mParam.get())->mHandle;
+            }
+            audio_patch_handle_t handle2;
+            if (command2->mCommand == CREATE_AUDIO_PATCH) {
+                handle2 = ((CreateAudioPatchData *)command2->mParam.get())->mHandle;
+            } else {
+                handle2 = ((ReleaseAudioPatchData *)command2->mParam.get())->mHandle;
+            }
+            if (handle != handle2) break;
+            ALOGV("Filtering out %s audio patch command for handle %d",
+                  (command->mCommand == CREATE_AUDIO_PATCH) ? "create" : "release", handle);
+            removedCommands.add(command2);
+            command->mTime = command2->mTime;
+            // force delayMs to non 0 so that code below does not request to wait for
+            // command status as the command is now delayed
+            delayMs = 1;
+        } break;
+
         case START_TONE:
         case STOP_TONE:
         default:
diff --git a/services/camera/libcameraservice/Android.mk b/services/camera/libcameraservice/Android.mk
index 2f485b9..9d6ab23 100644
--- a/services/camera/libcameraservice/Android.mk
+++ b/services/camera/libcameraservice/Android.mk
@@ -47,6 +47,7 @@
     device3/Camera3InputStream.cpp \
     device3/Camera3OutputStream.cpp \
     device3/Camera3ZslStream.cpp \
+    device3/Camera3DummyStream.cpp \
     device3/StatusTracker.cpp \
     gui/RingBufferConsumer.cpp \
     utils/CameraTraces.cpp \
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index 7766b90..fd5a426 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -487,12 +487,12 @@
         }
         if (client == NULL) {
             needsNewClient = true;
-            ret = connectHelperLocked(/*cameraClient*/NULL, // Empty binder callbacks
+            ret = connectHelperLocked(/*out*/client,
+                                      /*cameraClient*/NULL, // Empty binder callbacks
                                       cameraId,
                                       internalPackageName,
                                       uid,
-                                      pid,
-                                      client);
+                                      pid);
 
             if (ret != OK) {
                 // Error already logged by callee
@@ -659,14 +659,17 @@
     return true;
 }
 
-status_t CameraService::connectHelperLocked(const sp<ICameraClient>& cameraClient,
-                                      int cameraId,
-                                      const String16& clientPackageName,
-                                      int clientUid,
-                                      int callingPid,
-                                      /*out*/
-                                      sp<Client>& client,
-                                      int halVersion) {
+status_t CameraService::connectHelperLocked(
+        /*out*/
+        sp<Client>& client,
+        /*in*/
+        const sp<ICameraClient>& cameraClient,
+        int cameraId,
+        const String16& clientPackageName,
+        int clientUid,
+        int callingPid,
+        int halVersion,
+        bool legacyMode) {
 
     int facing = -1;
     int deviceVersion = getDeviceVersion(cameraId, &facing);
@@ -678,7 +681,7 @@
           case CAMERA_DEVICE_API_VERSION_1_0:
             client = new CameraClient(this, cameraClient,
                     clientPackageName, cameraId,
-                    facing, callingPid, clientUid, getpid());
+                    facing, callingPid, clientUid, getpid(), legacyMode);
             break;
           case CAMERA_DEVICE_API_VERSION_2_0:
           case CAMERA_DEVICE_API_VERSION_2_1:
@@ -687,7 +690,7 @@
           case CAMERA_DEVICE_API_VERSION_3_2:
             client = new Camera2Client(this, cameraClient,
                     clientPackageName, cameraId,
-                    facing, callingPid, clientUid, getpid());
+                    facing, callingPid, clientUid, getpid(), legacyMode);
             break;
           case -1:
             ALOGE("Invalid camera id %d", cameraId);
@@ -704,7 +707,7 @@
             // Only support higher HAL version device opened as HAL1.0 device.
             client = new CameraClient(this, cameraClient,
                     clientPackageName, cameraId,
-                    facing, callingPid, clientUid, getpid());
+                    facing, callingPid, clientUid, getpid(), legacyMode);
         } else {
             // Other combinations (e.g. HAL3.x open as HAL2.x) are not supported yet.
             ALOGE("Invalid camera HAL version %x: HAL %x device can only be"
@@ -760,12 +763,12 @@
             return OK;
         }
 
-        status = connectHelperLocked(cameraClient,
+        status = connectHelperLocked(/*out*/client,
+                                     cameraClient,
                                      cameraId,
                                      clientPackageName,
                                      clientUid,
-                                     callingPid,
-                                     client);
+                                     callingPid);
         if (status != OK) {
             return status;
         }
@@ -823,13 +826,14 @@
             return OK;
         }
 
-        status = connectHelperLocked(cameraClient,
+        status = connectHelperLocked(/*out*/client,
+                                     cameraClient,
                                      cameraId,
                                      clientPackageName,
                                      clientUid,
                                      callingPid,
-                                     client,
-                                     halVersion);
+                                     halVersion,
+                                     /*legacyMode*/true);
         if (status != OK) {
             return status;
         }
diff --git a/services/camera/libcameraservice/CameraService.h b/services/camera/libcameraservice/CameraService.h
index cb98c96..a7328cf 100644
--- a/services/camera/libcameraservice/CameraService.h
+++ b/services/camera/libcameraservice/CameraService.h
@@ -452,14 +452,17 @@
      *
      * Returns OK on success, or a negative error code.
      */
-    status_t            connectHelperLocked(const sp<ICameraClient>& cameraClient,
-                                      int cameraId,
-                                      const String16& clientPackageName,
-                                      int clientUid,
-                                      int callingPid,
-                                      /*out*/
-                                      sp<Client>& client,
-                                      int halVersion = CAMERA_HAL_API_VERSION_UNSPECIFIED);
+    status_t            connectHelperLocked(
+            /*out*/
+            sp<Client>& client,
+            /*in*/
+            const sp<ICameraClient>& cameraClient,
+            int cameraId,
+            const String16& clientPackageName,
+            int clientUid,
+            int callingPid,
+            int halVersion = CAMERA_HAL_API_VERSION_UNSPECIFIED,
+            bool legacyMode = false);
 };
 
 } // namespace android
diff --git a/services/camera/libcameraservice/api1/Camera2Client.cpp b/services/camera/libcameraservice/api1/Camera2Client.cpp
index 5eb5181..bc40971 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.cpp
+++ b/services/camera/libcameraservice/api1/Camera2Client.cpp
@@ -53,7 +53,8 @@
         int cameraFacing,
         int clientPid,
         uid_t clientUid,
-        int servicePid):
+        int servicePid,
+        bool legacyMode):
         Camera2ClientBase(cameraService, cameraClient, clientPackageName,
                 cameraId, cameraFacing, clientPid, clientUid, servicePid),
         mParameters(cameraId, cameraFacing)
@@ -62,6 +63,8 @@
 
     SharedParameters::Lock l(mParameters);
     l.mParameters.state = Parameters::DISCONNECTED;
+
+    mLegacyMode = legacyMode;
 }
 
 status_t Camera2Client::initialize(camera_module_t *module)
@@ -1449,6 +1452,13 @@
         return OK;
     }
 
+    // the camera2 api legacy mode can unconditionally disable the shutter sound
+    if (mLegacyMode) {
+        ALOGV("%s: Disable shutter sound in legacy mode", __FUNCTION__);
+        l.mParameters.playShutterSound = false;
+        return OK;
+    }
+
     // Disabling shutter sound may not be allowed. In that case only
     // allow the mediaserver process to disable the sound.
     char value[PROPERTY_VALUE_MAX];
diff --git a/services/camera/libcameraservice/api1/Camera2Client.h b/services/camera/libcameraservice/api1/Camera2Client.h
index 5ce757a..f5c3a30 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.h
+++ b/services/camera/libcameraservice/api1/Camera2Client.h
@@ -89,7 +89,8 @@
             int cameraFacing,
             int clientPid,
             uid_t clientUid,
-            int servicePid);
+            int servicePid,
+            bool legacyMode);
 
     virtual ~Camera2Client();
 
@@ -203,6 +204,7 @@
     bool mAfInMotion;
 
     /** Utility members */
+    bool mLegacyMode;
 
     // Wait until the camera device has received the latest control settings
     status_t syncWithDevice();
diff --git a/services/camera/libcameraservice/api1/CameraClient.cpp b/services/camera/libcameraservice/api1/CameraClient.cpp
index fb6b678..33bdaa3 100644
--- a/services/camera/libcameraservice/api1/CameraClient.cpp
+++ b/services/camera/libcameraservice/api1/CameraClient.cpp
@@ -38,7 +38,7 @@
         const String16& clientPackageName,
         int cameraId, int cameraFacing,
         int clientPid, int clientUid,
-        int servicePid):
+        int servicePid, bool legacyMode):
         Client(cameraService, cameraClient, clientPackageName,
                 cameraId, cameraFacing, clientPid, clientUid, servicePid)
 {
@@ -54,6 +54,7 @@
     // Callback is disabled by default
     mPreviewCallbackFlag = CAMERA_FRAME_CALLBACK_FLAG_NOOP;
     mOrientation = getOrientation(0, mCameraFacing == CAMERA_FACING_FRONT);
+    mLegacyMode = legacyMode;
     mPlayShutterSound = true;
     LOG1("CameraClient::CameraClient X (pid %d, id %d)", callingPid, cameraId);
 }
@@ -576,6 +577,13 @@
         return OK;
     }
 
+    // the camera2 api legacy mode can unconditionally disable the shutter sound
+    if (mLegacyMode) {
+        ALOGV("%s: Disable shutter sound in legacy mode", __FUNCTION__);
+        mPlayShutterSound = false;
+        return OK;
+    }
+
     // Disabling shutter sound may not be allowed. In that case only
     // allow the mediaserver process to disable the sound.
     char value[PROPERTY_VALUE_MAX];
@@ -930,7 +938,20 @@
     }
     previewBuffer = mPreviewBuffer;
 
-    memcpy(previewBuffer->base(), (uint8_t *)heap->base() + offset, size);
+    void* previewBufferBase = previewBuffer->base();
+    void* heapBase = heap->base();
+
+    if (heapBase == MAP_FAILED) {
+        ALOGE("%s: Failed to mmap heap for preview frame.", __FUNCTION__);
+        mLock.unlock();
+        return;
+    } else if (previewBufferBase == MAP_FAILED) {
+        ALOGE("%s: Failed to mmap preview buffer for preview frame.", __FUNCTION__);
+        mLock.unlock();
+        return;
+    }
+
+    memcpy(previewBufferBase, (uint8_t *) heapBase + offset, size);
 
     sp<MemoryBase> frame = new MemoryBase(previewBuffer, 0, size);
     if (frame == 0) {
diff --git a/services/camera/libcameraservice/api1/CameraClient.h b/services/camera/libcameraservice/api1/CameraClient.h
index 4b89564..6779f5e 100644
--- a/services/camera/libcameraservice/api1/CameraClient.h
+++ b/services/camera/libcameraservice/api1/CameraClient.h
@@ -64,7 +64,8 @@
             int cameraFacing,
             int clientPid,
             int clientUid,
-            int servicePid);
+            int servicePid,
+            bool legacyMode = false);
     ~CameraClient();
 
     status_t initialize(camera_module_t *module);
@@ -129,6 +130,7 @@
     int                             mPreviewCallbackFlag;
     int                             mOrientation;     // Current display orientation
     bool                            mPlayShutterSound;
+    bool                            mLegacyMode; // camera2 api legacy mode?
 
     // Ensures atomicity among the public methods
     mutable Mutex                   mLock;
diff --git a/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp b/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp
index cb9aca6..9849f4d 100644
--- a/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp
+++ b/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp
@@ -445,11 +445,18 @@
     if (mNewAEState) {
         if (!mAeInPrecapture) {
             // Waiting to see PRECAPTURE state
-            if (mAETriggerId == mTriggerId &&
-                    mAEState == ANDROID_CONTROL_AE_STATE_PRECAPTURE) {
-                ALOGV("%s: Got precapture start", __FUNCTION__);
-                mAeInPrecapture = true;
-                mTimeoutCount = kMaxTimeoutsForPrecaptureEnd;
+            if (mAETriggerId == mTriggerId) {
+                if (mAEState == ANDROID_CONTROL_AE_STATE_PRECAPTURE) {
+                    ALOGV("%s: Got precapture start", __FUNCTION__);
+                    mAeInPrecapture = true;
+                    mTimeoutCount = kMaxTimeoutsForPrecaptureEnd;
+                } else if (mAEState == ANDROID_CONTROL_AE_STATE_CONVERGED ||
+                        mAEState == ANDROID_CONTROL_AE_STATE_FLASH_REQUIRED) {
+                    // It is legal to transit to CONVERGED or FLASH_REQUIRED
+                    // directly after a trigger.
+                    ALOGV("%s: AE is already in good state, start capture", __FUNCTION__);
+                    return STANDARD_CAPTURE;
+                }
             }
         } else {
             // Waiting to see PRECAPTURE state end
diff --git a/services/camera/libcameraservice/api1/client2/Parameters.cpp b/services/camera/libcameraservice/api1/client2/Parameters.cpp
index b448e06..e7f9a78 100644
--- a/services/camera/libcameraservice/api1/client2/Parameters.cpp
+++ b/services/camera/libcameraservice/api1/client2/Parameters.cpp
@@ -644,8 +644,17 @@
         focusMode = Parameters::FOCUS_MODE_AUTO;
         params.set(CameraParameters::KEY_FOCUS_MODE,
                 CameraParameters::FOCUS_MODE_AUTO);
-        String8 supportedFocusModes(CameraParameters::FOCUS_MODE_INFINITY);
-        bool addComma = true;
+        String8 supportedFocusModes;
+        bool addComma = false;
+        camera_metadata_ro_entry_t focusDistanceCalibration =
+            staticInfo(ANDROID_LENS_INFO_FOCUS_DISTANCE_CALIBRATION, 0, 0, false);
+
+        if (focusDistanceCalibration.count &&
+                focusDistanceCalibration.data.u8[0] !=
+                ANDROID_LENS_INFO_FOCUS_DISTANCE_CALIBRATION_UNCALIBRATED) {
+            supportedFocusModes += CameraParameters::FOCUS_MODE_INFINITY;
+            addComma = true;
+        }
 
         for (size_t i=0; i < availableAfModes.count; i++) {
             if (addComma) supportedFocusModes += ",";
diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp b/services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp
index 37de610..2d31275 100644
--- a/services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp
+++ b/services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp
@@ -51,7 +51,8 @@
         mZslStreamId(NO_STREAM),
         mFrameListHead(0),
         mZslQueueHead(0),
-        mZslQueueTail(0) {
+        mZslQueueTail(0),
+        mHasFocuser(false) {
     // Initialize buffer queue and frame list based on pipeline max depth.
     size_t pipelineMaxDepth = kDefaultMaxPipelineDepth;
     if (client != 0) {
@@ -67,13 +68,22 @@
                         " use default pipeline max depth %zu", __FUNCTION__,
                         kDefaultMaxPipelineDepth);
             }
+
+            entry = device->info().find(ANDROID_LENS_INFO_MINIMUM_FOCUS_DISTANCE);
+            if (entry.count > 0 && entry.data.f[0] != 0.) {
+                mHasFocuser = true;
+            }
         }
     }
 
     ALOGV("%s: Initialize buffer queue and frame list depth based on max pipeline depth (%d)",
           __FUNCTION__, pipelineMaxDepth);
-    mBufferQueueDepth = pipelineMaxDepth + 1;
-    mFrameListDepth = pipelineMaxDepth + 1;
+    // Need to keep buffer queue longer than metadata queue because sometimes buffer arrives
+    // earlier than metadata which causes the buffer corresponding to oldest metadata being
+    // removed.
+    mFrameListDepth = pipelineMaxDepth;
+    mBufferQueueDepth = mFrameListDepth + 1;
+
 
     mZslQueue.insertAt(0, mBufferQueueDepth);
     mFrameList.insertAt(0, mFrameListDepth);
@@ -489,20 +499,23 @@
                     continue;
                 }
 
-                // Make sure the candidate frame has good focus.
-                entry = frame.find(ANDROID_CONTROL_AF_STATE);
-                if (entry.count == 0) {
-                    ALOGW("%s: ZSL queue frame has no AF state field!",
-                            __FUNCTION__);
-                    continue;
-                }
-                uint8_t afState = entry.data.u8[0];
-                if (afState != ANDROID_CONTROL_AF_STATE_PASSIVE_FOCUSED &&
-                        afState != ANDROID_CONTROL_AF_STATE_FOCUSED_LOCKED &&
-                        afState != ANDROID_CONTROL_AF_STATE_NOT_FOCUSED_LOCKED) {
-                    ALOGW("%s: ZSL queue frame AF state is %d is not good for capture, skip it",
-                            __FUNCTION__, afState);
-                    continue;
+                // Check AF state if device has focuser
+                if (mHasFocuser) {
+                    // Make sure the candidate frame has good focus.
+                    entry = frame.find(ANDROID_CONTROL_AF_STATE);
+                    if (entry.count == 0) {
+                        ALOGW("%s: ZSL queue frame has no AF state field!",
+                                __FUNCTION__);
+                        continue;
+                    }
+                    uint8_t afState = entry.data.u8[0];
+                    if (afState != ANDROID_CONTROL_AF_STATE_PASSIVE_FOCUSED &&
+                            afState != ANDROID_CONTROL_AF_STATE_FOCUSED_LOCKED &&
+                            afState != ANDROID_CONTROL_AF_STATE_NOT_FOCUSED_LOCKED) {
+                        ALOGW("%s: ZSL queue frame AF state is %d is not good for capture, skip it",
+                                __FUNCTION__, afState);
+                        continue;
+                    }
                 }
 
                 minTimestamp = frameTimestamp;
@@ -545,13 +558,15 @@
 }
 
 void ZslProcessor3::onBufferReleased(const BufferInfo& bufferInfo) {
-    Mutex::Autolock l(mInputMutex);
 
     // ignore output buffers
     if (bufferInfo.mOutput) {
         return;
     }
 
+    // Lock mutex only once we know this is an input buffer returned to avoid
+    // potential deadlock
+    Mutex::Autolock l(mInputMutex);
     // TODO: Verify that the buffer is in our queue by looking at timestamp
     // theoretically unnecessary unless we change the following assumptions:
     // -- only 1 buffer reprocessed at a time (which is the case now)
diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessor3.h b/services/camera/libcameraservice/api1/client2/ZslProcessor3.h
index dfb1457..daa352b 100644
--- a/services/camera/libcameraservice/api1/client2/ZslProcessor3.h
+++ b/services/camera/libcameraservice/api1/client2/ZslProcessor3.h
@@ -121,6 +121,8 @@
 
     CameraMetadata mLatestCapturedRequest;
 
+    bool mHasFocuser;
+
     virtual bool threadLoop();
 
     status_t clearZslQueueLocked();
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
index 86f82a3..80c797a 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
@@ -254,9 +254,17 @@
 }
 
 status_t CameraDeviceClient::endConfigure() {
-    // TODO: Implement this.
-    ALOGE("%s: Not implemented yet.", __FUNCTION__);
-    return OK;
+    ALOGV("%s: ending configure (%zu streams)",
+            __FUNCTION__, mStreamMap.size());
+
+    status_t res;
+    if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
+
+    Mutex::Autolock icl(mBinderSerializationLock);
+
+    if (!mDevice.get()) return DEAD_OBJECT;
+
+    return mDevice->configureStreams();
 }
 
 status_t CameraDeviceClient::deleteStream(int streamId) {
diff --git a/services/camera/libcameraservice/common/CameraDeviceBase.h b/services/camera/libcameraservice/common/CameraDeviceBase.h
index 9e124b0..d26e20c 100644
--- a/services/camera/libcameraservice/common/CameraDeviceBase.h
+++ b/services/camera/libcameraservice/common/CameraDeviceBase.h
@@ -141,6 +141,18 @@
     virtual status_t deleteReprocessStream(int id) = 0;
 
     /**
+     * Take the currently-defined set of streams and configure the HAL to use
+     * them. This is a long-running operation (may be several hundered ms).
+     *
+     * The device must be idle (see waitUntilDrained) before calling this.
+     *
+     * Returns OK on success; otherwise on error:
+     * - BAD_VALUE if the set of streams was invalid (e.g. fmts or sizes)
+     * - INVALID_OPERATION if the device was in the wrong state
+     */
+    virtual status_t configureStreams() = 0;
+
+    /**
      * Create a metadata buffer with fields that the HAL device believes are
      * best for the given use case
      */
diff --git a/services/camera/libcameraservice/device2/Camera2Device.cpp b/services/camera/libcameraservice/device2/Camera2Device.cpp
index d473a76..8caadd6 100644
--- a/services/camera/libcameraservice/device2/Camera2Device.cpp
+++ b/services/camera/libcameraservice/device2/Camera2Device.cpp
@@ -415,6 +415,19 @@
     return OK;
 }
 
+status_t Camera2Device::configureStreams() {
+    ATRACE_CALL();
+    ALOGV("%s: E", __FUNCTION__);
+
+    /**
+     * HAL2 devices do not need to configure streams;
+     * streams are created on the fly.
+     */
+    ALOGW("%s: No-op for HAL2 devices", __FUNCTION__);
+
+    return OK;
+}
+
 
 status_t Camera2Device::createDefaultRequest(int templateId,
         CameraMetadata *request) {
diff --git a/services/camera/libcameraservice/device2/Camera2Device.h b/services/camera/libcameraservice/device2/Camera2Device.h
index d0ca46e..2a3f1d9 100644
--- a/services/camera/libcameraservice/device2/Camera2Device.h
+++ b/services/camera/libcameraservice/device2/Camera2Device.h
@@ -64,6 +64,8 @@
     virtual status_t setStreamTransform(int id, int transform);
     virtual status_t deleteStream(int id);
     virtual status_t deleteReprocessStream(int id);
+    // No-op on HAL2 devices
+    virtual status_t configureStreams();
     virtual status_t createDefaultRequest(int templateId, CameraMetadata *request);
     virtual status_t waitUntilDrained();
     virtual status_t setNotifyCallback(NotificationListener *listener);
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index ed350c1..6f78db5 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -48,6 +48,7 @@
 #include "device3/Camera3OutputStream.h"
 #include "device3/Camera3InputStream.h"
 #include "device3/Camera3ZslStream.h"
+#include "device3/Camera3DummyStream.h"
 #include "CameraService.h"
 
 using namespace android::camera3;
@@ -181,6 +182,7 @@
     mHal3Device = device;
     mStatus = STATUS_UNCONFIGURED;
     mNextStreamId = 0;
+    mDummyStreamId = NO_STREAM;
     mNeedConfig = true;
     mPauseStateNotify = false;
 
@@ -1000,6 +1002,15 @@
     return INVALID_OPERATION;
 }
 
+status_t Camera3Device::configureStreams() {
+    ATRACE_CALL();
+    ALOGV("%s: E", __FUNCTION__);
+
+    Mutex::Autolock il(mInterfaceLock);
+    Mutex::Autolock l(mLock);
+
+    return configureStreamsLocked();
+}
 
 status_t Camera3Device::createDefaultRequest(int templateId,
         CameraMetadata *request) {
@@ -1136,6 +1147,7 @@
         ALOGW("%s: Replacing old callback listener", __FUNCTION__);
     }
     mListener = listener;
+    mRequestThread->setNotifyCallback(listener);
 
     return OK;
 }
@@ -1259,9 +1271,15 @@
     ALOGV("%s: Camera %d: Flushing all requests", __FUNCTION__, mId);
     Mutex::Autolock il(mInterfaceLock);
 
+    NotificationListener* listener;
+    {
+        Mutex::Autolock l(mOutputLock);
+        listener = mListener;
+    }
+
     {
         Mutex::Autolock l(mLock);
-        mRequestThread->clear(/*out*/frameNumber);
+        mRequestThread->clear(listener, /*out*/frameNumber);
     }
 
     status_t res;
@@ -1402,6 +1420,15 @@
         return OK;
     }
 
+    // Workaround for device HALv3.2 or older spec bug - zero streams requires
+    // adding a dummy stream instead.
+    // TODO: Bug: 17321404 for fixing the HAL spec and removing this workaround.
+    if (mOutputStreams.size() == 0) {
+        addDummyStreamLocked();
+    } else {
+        tryRemoveDummyStreamLocked();
+    }
+
     // Start configuring the streams
     ALOGV("%s: Camera %d: Starting stream configuration", __FUNCTION__, mId);
 
@@ -1449,7 +1476,42 @@
     res = mHal3Device->ops->configure_streams(mHal3Device, &config);
     ATRACE_END();
 
-    if (res != OK) {
+    if (res == BAD_VALUE) {
+        // HAL rejected this set of streams as unsupported, clean up config
+        // attempt and return to unconfigured state
+        if (mInputStream != NULL && mInputStream->isConfiguring()) {
+            res = mInputStream->cancelConfiguration();
+            if (res != OK) {
+                SET_ERR_L("Can't cancel configuring input stream %d: %s (%d)",
+                        mInputStream->getId(), strerror(-res), res);
+                return res;
+            }
+        }
+
+        for (size_t i = 0; i < mOutputStreams.size(); i++) {
+            sp<Camera3OutputStreamInterface> outputStream =
+                    mOutputStreams.editValueAt(i);
+            if (outputStream->isConfiguring()) {
+                res = outputStream->cancelConfiguration();
+                if (res != OK) {
+                    SET_ERR_L(
+                        "Can't cancel configuring output stream %d: %s (%d)",
+                        outputStream->getId(), strerror(-res), res);
+                    return res;
+                }
+            }
+        }
+
+        // Return state to that at start of call, so that future configures
+        // properly clean things up
+        mStatus = STATUS_UNCONFIGURED;
+        mNeedConfig = true;
+
+        ALOGV("%s: Camera %d: Stream configuration failed", __FUNCTION__, mId);
+        return BAD_VALUE;
+    } else if (res != OK) {
+        // Some other kind of error from configure_streams - this is not
+        // expected
         SET_ERR_L("Unable to configure streams with HAL: %s (%d)",
                 strerror(-res), res);
         return res;
@@ -1489,7 +1551,7 @@
 
     mNeedConfig = false;
 
-    if (config.num_streams > 0) {
+    if (mDummyStreamId == NO_STREAM) {
         mStatus = STATUS_CONFIGURED;
     } else {
         mStatus = STATUS_UNCONFIGURED;
@@ -1503,6 +1565,69 @@
     return OK;
 }
 
+status_t Camera3Device::addDummyStreamLocked() {
+    ATRACE_CALL();
+    status_t res;
+
+    if (mDummyStreamId != NO_STREAM) {
+        // Should never be adding a second dummy stream when one is already
+        // active
+        SET_ERR_L("%s: Camera %d: A dummy stream already exists!",
+                __FUNCTION__, mId);
+        return INVALID_OPERATION;
+    }
+
+    ALOGV("%s: Camera %d: Adding a dummy stream", __FUNCTION__, mId);
+
+    sp<Camera3OutputStreamInterface> dummyStream =
+            new Camera3DummyStream(mNextStreamId);
+
+    res = mOutputStreams.add(mNextStreamId, dummyStream);
+    if (res < 0) {
+        SET_ERR_L("Can't add dummy stream to set: %s (%d)", strerror(-res), res);
+        return res;
+    }
+
+    mDummyStreamId = mNextStreamId;
+    mNextStreamId++;
+
+    return OK;
+}
+
+status_t Camera3Device::tryRemoveDummyStreamLocked() {
+    ATRACE_CALL();
+    status_t res;
+
+    if (mDummyStreamId == NO_STREAM) return OK;
+    if (mOutputStreams.size() == 1) return OK;
+
+    ALOGV("%s: Camera %d: Removing the dummy stream", __FUNCTION__, mId);
+
+    // Ok, have a dummy stream and there's at least one other output stream,
+    // so remove the dummy
+
+    sp<Camera3StreamInterface> deletedStream;
+    ssize_t outputStreamIdx = mOutputStreams.indexOfKey(mDummyStreamId);
+    if (outputStreamIdx == NAME_NOT_FOUND) {
+        SET_ERR_L("Dummy stream %d does not appear to exist", mDummyStreamId);
+        return INVALID_OPERATION;
+    }
+
+    deletedStream = mOutputStreams.editValueAt(outputStreamIdx);
+    mOutputStreams.removeItemsAt(outputStreamIdx);
+
+    // Free up the stream endpoint so that it can be used by some other stream
+    res = deletedStream->disconnect();
+    if (res != OK) {
+        SET_ERR_L("Can't disconnect deleted dummy stream %d", mDummyStreamId);
+        // fall through since we want to still list the stream as deleted.
+    }
+    mDeletedStreams.add(deletedStream);
+    mDummyStreamId = NO_STREAM;
+
+    return res;
+}
+
 void Camera3Device::setErrorState(const char *fmt, ...) {
     Mutex::Autolock l(mLock);
     va_list args;
@@ -1535,14 +1660,20 @@
     // But only do error state transition steps for the first error
     if (mStatus == STATUS_ERROR || mStatus == STATUS_UNINITIALIZED) return;
 
-    // Save stack trace. View by dumping it later.
-    CameraTraces::saveTrace();
-    // TODO: consider adding errorCause and client pid/procname
-
     mErrorCause = errorCause;
 
     mRequestThread->setPaused(true);
     mStatus = STATUS_ERROR;
+
+    // Notify upstream about a device error
+    if (mListener != NULL) {
+        mListener->notifyError(ICameraDeviceCallbacks::ERROR_CAMERA_DEVICE,
+                CaptureResultExtras());
+    }
+
+    // Save stack trace. View by dumping it later.
+    CameraTraces::saveTrace();
+    // TODO: consider adding errorCause and client pid/procname
 }
 
 /**
@@ -2013,84 +2144,11 @@
 
     switch (msg->type) {
         case CAMERA3_MSG_ERROR: {
-            int streamId = 0;
-            if (msg->message.error.error_stream != NULL) {
-                Camera3Stream *stream =
-                        Camera3Stream::cast(
-                                  msg->message.error.error_stream);
-                streamId = stream->getId();
-            }
-            ALOGV("Camera %d: %s: HAL error, frame %d, stream %d: %d",
-                    mId, __FUNCTION__, msg->message.error.frame_number,
-                    streamId, msg->message.error.error_code);
-
-            CaptureResultExtras resultExtras;
-            // Set request error status for the request in the in-flight tracking
-            {
-                Mutex::Autolock l(mInFlightLock);
-                ssize_t idx = mInFlightMap.indexOfKey(msg->message.error.frame_number);
-                if (idx >= 0) {
-                    InFlightRequest &r = mInFlightMap.editValueAt(idx);
-                    r.requestStatus = msg->message.error.error_code;
-                    resultExtras = r.resultExtras;
-                } else {
-                    resultExtras.frameNumber = msg->message.error.frame_number;
-                    ALOGE("Camera %d: %s: cannot find in-flight request on frame %" PRId64
-                          " error", mId, __FUNCTION__, resultExtras.frameNumber);
-                }
-            }
-
-            if (listener != NULL) {
-                if (msg->message.error.error_code == CAMERA3_MSG_ERROR_DEVICE) {
-                    listener->notifyError(ICameraDeviceCallbacks::ERROR_CAMERA_DEVICE,
-                                          resultExtras);
-                }
-            } else {
-                ALOGE("Camera %d: %s: no listener available", mId, __FUNCTION__);
-            }
+            notifyError(msg->message.error, listener);
             break;
         }
         case CAMERA3_MSG_SHUTTER: {
-            ssize_t idx;
-            uint32_t frameNumber = msg->message.shutter.frame_number;
-            nsecs_t timestamp = msg->message.shutter.timestamp;
-            // Verify ordering of shutter notifications
-            {
-                Mutex::Autolock l(mOutputLock);
-                // TODO: need to track errors for tighter bounds on expected frame number.
-                if (frameNumber < mNextShutterFrameNumber) {
-                    SET_ERR("Shutter notification out-of-order. Expected "
-                            "notification for frame %d, got frame %d",
-                            mNextShutterFrameNumber, frameNumber);
-                    break;
-                }
-                mNextShutterFrameNumber = frameNumber + 1;
-            }
-
-            CaptureResultExtras resultExtras;
-
-            // Set timestamp for the request in the in-flight tracking
-            // and get the request ID to send upstream
-            {
-                Mutex::Autolock l(mInFlightLock);
-                idx = mInFlightMap.indexOfKey(frameNumber);
-                if (idx >= 0) {
-                    InFlightRequest &r = mInFlightMap.editValueAt(idx);
-                    r.captureTimestamp = timestamp;
-                    resultExtras = r.resultExtras;
-                }
-            }
-            if (idx < 0) {
-                SET_ERR("Shutter notification for non-existent frame number %d",
-                        frameNumber);
-                break;
-            }
-            ALOGVV("Camera %d: %s: Shutter fired for frame %d (id %d) at %" PRId64,
-                    mId, __FUNCTION__, frameNumber, resultExtras.requestId, timestamp);
-            // Call listener, if any
-            if (listener != NULL) {
-                listener->notifyShutter(resultExtras, timestamp);
-            }
+            notifyShutter(msg->message.shutter, listener);
             break;
         }
         default:
@@ -2099,6 +2157,121 @@
     }
 }
 
+void Camera3Device::notifyError(const camera3_error_msg_t &msg,
+        NotificationListener *listener) {
+
+    // Map camera HAL error codes to ICameraDeviceCallback error codes
+    // Index into this with the HAL error code
+    static const ICameraDeviceCallbacks::CameraErrorCode
+            halErrorMap[CAMERA3_MSG_NUM_ERRORS] = {
+        // 0 = Unused error code
+        ICameraDeviceCallbacks::ERROR_CAMERA_INVALID_ERROR,
+        // 1 = CAMERA3_MSG_ERROR_DEVICE
+        ICameraDeviceCallbacks::ERROR_CAMERA_DEVICE,
+        // 2 = CAMERA3_MSG_ERROR_REQUEST
+        ICameraDeviceCallbacks::ERROR_CAMERA_REQUEST,
+        // 3 = CAMERA3_MSG_ERROR_RESULT
+        ICameraDeviceCallbacks::ERROR_CAMERA_RESULT,
+        // 4 = CAMERA3_MSG_ERROR_BUFFER
+        ICameraDeviceCallbacks::ERROR_CAMERA_BUFFER
+    };
+
+    ICameraDeviceCallbacks::CameraErrorCode errorCode =
+            ((msg.error_code >= 0) &&
+                    (msg.error_code < CAMERA3_MSG_NUM_ERRORS)) ?
+            halErrorMap[msg.error_code] :
+            ICameraDeviceCallbacks::ERROR_CAMERA_INVALID_ERROR;
+
+    int streamId = 0;
+    if (msg.error_stream != NULL) {
+        Camera3Stream *stream =
+                Camera3Stream::cast(msg.error_stream);
+        streamId = stream->getId();
+    }
+    ALOGV("Camera %d: %s: HAL error, frame %d, stream %d: %d",
+            mId, __FUNCTION__, msg.frame_number,
+            streamId, msg.error_code);
+
+    CaptureResultExtras resultExtras;
+    switch (errorCode) {
+        case ICameraDeviceCallbacks::ERROR_CAMERA_DEVICE:
+            // SET_ERR calls notifyError
+            SET_ERR("Camera HAL reported serious device error");
+            break;
+        case ICameraDeviceCallbacks::ERROR_CAMERA_REQUEST:
+        case ICameraDeviceCallbacks::ERROR_CAMERA_RESULT:
+        case ICameraDeviceCallbacks::ERROR_CAMERA_BUFFER:
+            {
+                Mutex::Autolock l(mInFlightLock);
+                ssize_t idx = mInFlightMap.indexOfKey(msg.frame_number);
+                if (idx >= 0) {
+                    InFlightRequest &r = mInFlightMap.editValueAt(idx);
+                    r.requestStatus = msg.error_code;
+                    resultExtras = r.resultExtras;
+                } else {
+                    resultExtras.frameNumber = msg.frame_number;
+                    ALOGE("Camera %d: %s: cannot find in-flight request on "
+                            "frame %" PRId64 " error", mId, __FUNCTION__,
+                            resultExtras.frameNumber);
+                }
+            }
+            if (listener != NULL) {
+                listener->notifyError(errorCode, resultExtras);
+            } else {
+                ALOGE("Camera %d: %s: no listener available", mId, __FUNCTION__);
+            }
+            break;
+        default:
+            // SET_ERR calls notifyError
+            SET_ERR("Unknown error message from HAL: %d", msg.error_code);
+            break;
+    }
+}
+
+void Camera3Device::notifyShutter(const camera3_shutter_msg_t &msg,
+        NotificationListener *listener) {
+    ssize_t idx;
+    // Verify ordering of shutter notifications
+    {
+        Mutex::Autolock l(mOutputLock);
+        // TODO: need to track errors for tighter bounds on expected frame number.
+        if (msg.frame_number < mNextShutterFrameNumber) {
+            SET_ERR("Shutter notification out-of-order. Expected "
+                    "notification for frame %d, got frame %d",
+                    mNextShutterFrameNumber, msg.frame_number);
+            return;
+        }
+        mNextShutterFrameNumber = msg.frame_number + 1;
+    }
+
+    CaptureResultExtras resultExtras;
+
+    // Set timestamp for the request in the in-flight tracking
+    // and get the request ID to send upstream
+    {
+        Mutex::Autolock l(mInFlightLock);
+        idx = mInFlightMap.indexOfKey(msg.frame_number);
+        if (idx >= 0) {
+            InFlightRequest &r = mInFlightMap.editValueAt(idx);
+            r.captureTimestamp = msg.timestamp;
+            resultExtras = r.resultExtras;
+        }
+    }
+    if (idx < 0) {
+        SET_ERR("Shutter notification for non-existent frame number %d",
+                msg.frame_number);
+        return;
+    }
+    ALOGVV("Camera %d: %s: Shutter fired for frame %d (id %d) at %" PRId64,
+            mId, __FUNCTION__,
+            msg.frame_number, resultExtras.requestId, msg.timestamp);
+    // Call listener, if any
+    if (listener != NULL) {
+        listener->notifyShutter(resultExtras, msg.timestamp);
+    }
+}
+
+
 CameraMetadata Camera3Device::getLatestRequestLocked() {
     ALOGV("%s", __FUNCTION__);
 
@@ -2129,10 +2302,18 @@
         mPaused(true),
         mFrameNumber(0),
         mLatestRequestId(NAME_NOT_FOUND),
+        mCurrentAfTriggerId(0),
+        mCurrentPreCaptureTriggerId(0),
         mRepeatingLastFrameNumber(NO_IN_FLIGHT_REPEATING_FRAMES) {
     mStatusId = statusTracker->addComponent();
 }
 
+void Camera3Device::RequestThread::setNotifyCallback(
+        NotificationListener *listener) {
+    Mutex::Autolock l(mRequestLock);
+    mListener = listener;
+}
+
 void Camera3Device::RequestThread::configurationComplete() {
     Mutex::Autolock l(mRequestLock);
     mReconfigured = true;
@@ -2255,20 +2436,26 @@
     return OK;
 }
 
-status_t Camera3Device::RequestThread::clear(/*out*/int64_t *lastFrameNumber) {
+status_t Camera3Device::RequestThread::clear(
+        NotificationListener *listener,
+        /*out*/int64_t *lastFrameNumber) {
     Mutex::Autolock l(mRequestLock);
     ALOGV("RequestThread::%s:", __FUNCTION__);
+
     mRepeatingRequests.clear();
 
-    // Decrement repeating frame count for those requests never sent to device
-    // TODO: Remove this after we have proper error handling so these requests
-    // will generate an error callback. This might be the only place calling
-    // isRepeatingRequestLocked. If so, isRepeatingRequestLocked should also be removed.
-    const RequestList &requests = mRequestQueue;
-    for (RequestList::const_iterator it = requests.begin();
-            it != requests.end(); ++it) {
-        if (isRepeatingRequestLocked(*it)) {
-            mRepeatingLastFrameNumber--;
+    // Send errors for all requests pending in the request queue, including
+    // pending repeating requests
+    if (listener != NULL) {
+        for (RequestList::iterator it = mRequestQueue.begin();
+                 it != mRequestQueue.end(); ++it) {
+            // Set the frame number this request would have had, if it
+            // had been submitted; this frame number will not be reused.
+            // The requestId and burstId fields were set when the request was
+            // submitted originally (in convertMetadataListToRequestListLocked)
+            (*it)->mResultExtras.frameNumber = mFrameNumber++;
+            listener->notifyError(ICameraDeviceCallbacks::ERROR_CAMERA_REQUEST,
+                    (*it)->mResultExtras);
         }
     }
     mRequestQueue.clear();
@@ -2410,8 +2597,17 @@
         request.input_buffer = &inputBuffer;
         res = nextRequest->mInputStream->getInputBuffer(&inputBuffer);
         if (res != OK) {
+            // Can't get input buffer from gralloc queue - this could be due to
+            // disconnected queue or other producer misbehavior, so not a fatal
+            // error
             ALOGE("RequestThread: Can't get input buffer, skipping request:"
                     " %s (%d)", strerror(-res), res);
+            Mutex::Autolock l(mRequestLock);
+            if (mListener != NULL) {
+                mListener->notifyError(
+                        ICameraDeviceCallbacks::ERROR_CAMERA_REQUEST,
+                        nextRequest->mResultExtras);
+            }
             cleanUpFailedRequest(request, nextRequest, outputBuffers);
             return true;
         }
@@ -2427,8 +2623,17 @@
         res = nextRequest->mOutputStreams.editItemAt(i)->
                 getBuffer(&outputBuffers.editItemAt(i));
         if (res != OK) {
+            // Can't get output buffer from gralloc queue - this could be due to
+            // abandoned queue or other consumer misbehavior, so not a fatal
+            // error
             ALOGE("RequestThread: Can't get output buffer, skipping request:"
                     " %s (%d)", strerror(-res), res);
+            Mutex::Autolock l(mRequestLock);
+            if (mListener != NULL) {
+                mListener->notifyError(
+                        ICameraDeviceCallbacks::ERROR_CAMERA_REQUEST,
+                        nextRequest->mResultExtras);
+            }
             cleanUpFailedRequest(request, nextRequest, outputBuffers);
             return true;
         }
@@ -2439,6 +2644,7 @@
     // Log request in the in-flight queue
     sp<Camera3Device> parent = mParent.promote();
     if (parent == NULL) {
+        // Should not happen, and nowhere to send errors to, so just log it
         CLOGE("RequestThread: Parent is gone");
         cleanUpFailedRequest(request, nextRequest, outputBuffers);
         return false;
@@ -2474,6 +2680,9 @@
     ATRACE_END();
 
     if (res != OK) {
+        // Should only get a failure here for malformed requests or device-level
+        // errors, so consider all errors fatal.  Bad metadata failures should
+        // come through notify.
         SET_ERR("RequestThread: Unable to submit capture request %d to HAL"
                 " device: %s (%d)", request.frame_number, strerror(-res), res);
         cleanUpFailedRequest(request, nextRequest, outputBuffers);
@@ -2611,6 +2820,8 @@
 
     if (nextRequest != NULL) {
         nextRequest->mResultExtras.frameNumber = mFrameNumber++;
+        nextRequest->mResultExtras.afTriggerId = mCurrentAfTriggerId;
+        nextRequest->mResultExtras.precaptureTriggerId = mCurrentPreCaptureTriggerId;
     }
     return nextRequest;
 }
@@ -2690,8 +2901,13 @@
         if (tag == ANDROID_CONTROL_AF_TRIGGER_ID || tag == ANDROID_CONTROL_AE_PRECAPTURE_ID) {
             bool isAeTrigger = (trigger.metadataTag == ANDROID_CONTROL_AE_PRECAPTURE_ID);
             uint32_t triggerId = static_cast<uint32_t>(trigger.entryValue);
-            isAeTrigger ? request->mResultExtras.precaptureTriggerId = triggerId :
-                          request->mResultExtras.afTriggerId = triggerId;
+            if (isAeTrigger) {
+                request->mResultExtras.precaptureTriggerId = triggerId;
+                mCurrentPreCaptureTriggerId = triggerId;
+            } else {
+                request->mResultExtras.afTriggerId = triggerId;
+                mCurrentAfTriggerId = triggerId;
+            }
             if (parent->mDeviceVersion >= CAMERA_DEVICE_API_VERSION_3_2) {
                 continue; // Trigger ID tag is deprecated since device HAL 3.2
             }
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index 7656237..b99ed7e 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -114,6 +114,8 @@
     virtual status_t deleteStream(int id);
     virtual status_t deleteReprocessStream(int id);
 
+    virtual status_t configureStreams();
+
     virtual status_t createDefaultRequest(int templateId, CameraMetadata *request);
 
     // Transitions to the idle state on success
@@ -149,6 +151,8 @@
     struct                     RequestTrigger;
     // minimal jpeg buffer size: 256KB + blob header
     static const ssize_t       kMinJpegBufferSize = 256 * 1024 + sizeof(camera3_jpeg_blob);
+    // Constant to use for stream ID when one doesn't exist
+    static const int           NO_STREAM = -1;
 
     // A lock to enforce serialization on the input/configure side
     // of the public interface.
@@ -194,6 +198,8 @@
     int                        mNextStreamId;
     bool                       mNeedConfig;
 
+    int                        mDummyStreamId;
+
     // Whether to send state updates upstream
     // Pause when doing transparent reconfiguration
     bool                       mPauseStateNotify;
@@ -289,6 +295,17 @@
     status_t           configureStreamsLocked();
 
     /**
+     * Add a dummy stream to the current stream set as a workaround for
+     * not allowing 0 streams in the camera HAL spec.
+     */
+    status_t           addDummyStreamLocked();
+
+    /**
+     * Remove a dummy stream if the current config includes real streams.
+     */
+    status_t           tryRemoveDummyStreamLocked();
+
+    /**
      * Set device into an error state due to some fatal failure, and set an
      * error message to indicate why. Only the first call's message will be
      * used. The message is also sent to the log.
@@ -344,6 +361,8 @@
                 sp<camera3::StatusTracker> statusTracker,
                 camera3_device_t *hal3Device);
 
+        void     setNotifyCallback(NotificationListener *listener);
+
         /**
          * Call after stream (re)-configuration is completed.
          */
@@ -367,7 +386,8 @@
         /**
          * Remove all queued and repeating requests, and pending triggers
          */
-        status_t clear(/*out*/
+        status_t clear(NotificationListener *listener,
+                       /*out*/
                        int64_t *lastFrameNumber = NULL);
 
         /**
@@ -450,6 +470,8 @@
         wp<camera3::StatusTracker>  mStatusTracker;
         camera3_device_t  *mHal3Device;
 
+        NotificationListener *mListener;
+
         const int          mId;       // The camera ID
         int                mStatusId; // The RequestThread's component ID for
                                       // status tracking
@@ -484,6 +506,8 @@
         TriggerMap         mTriggerMap;
         TriggerMap         mTriggerRemovedMap;
         TriggerMap         mTriggerReplacedMap;
+        uint32_t           mCurrentAfTriggerId;
+        uint32_t           mCurrentPreCaptureTriggerId;
 
         int64_t            mRepeatingLastFrameNumber;
     };
@@ -607,6 +631,12 @@
 
     void notify(const camera3_notify_msg *msg);
 
+    // Specific notify handlers
+    void notifyError(const camera3_error_msg_t &msg,
+            NotificationListener *listener);
+    void notifyShutter(const camera3_shutter_msg_t &msg,
+            NotificationListener *listener);
+
     /**
      * Static callback forwarding methods from HAL to instance
      */
diff --git a/services/camera/libcameraservice/device3/Camera3DummyStream.cpp b/services/camera/libcameraservice/device3/Camera3DummyStream.cpp
new file mode 100644
index 0000000..6656b09
--- /dev/null
+++ b/services/camera/libcameraservice/device3/Camera3DummyStream.cpp
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "Camera3-DummyStream"
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+//#define LOG_NDEBUG 0
+
+#include <utils/Log.h>
+#include <utils/Trace.h>
+#include "Camera3DummyStream.h"
+
+namespace android {
+
+namespace camera3 {
+
+Camera3DummyStream::Camera3DummyStream(int id) :
+        Camera3IOStreamBase(id, CAMERA3_STREAM_OUTPUT, DUMMY_WIDTH, DUMMY_HEIGHT,
+                /*maxSize*/0, DUMMY_FORMAT) {
+
+}
+
+Camera3DummyStream::~Camera3DummyStream() {
+
+}
+
+status_t Camera3DummyStream::getBufferLocked(camera3_stream_buffer *buffer) {
+    ATRACE_CALL();
+    ALOGE("%s: Stream %d: Dummy stream cannot produce buffers!", mId);
+    return INVALID_OPERATION;
+}
+
+status_t Camera3DummyStream::returnBufferLocked(
+        const camera3_stream_buffer &buffer,
+        nsecs_t timestamp) {
+    ATRACE_CALL();
+    ALOGE("%s: Stream %d: Dummy stream cannot return buffers!", mId);
+    return INVALID_OPERATION;
+}
+
+status_t Camera3DummyStream::returnBufferCheckedLocked(
+            const camera3_stream_buffer &buffer,
+            nsecs_t timestamp,
+            bool output,
+            /*out*/
+            sp<Fence> *releaseFenceOut) {
+    ATRACE_CALL();
+    ALOGE("%s: Stream %d: Dummy stream cannot return buffers!", mId);
+    return INVALID_OPERATION;
+}
+
+void Camera3DummyStream::dump(int fd, const Vector<String16> &args) const {
+    (void) args;
+    String8 lines;
+    lines.appendFormat("    Stream[%d]: Dummy\n", mId);
+    write(fd, lines.string(), lines.size());
+
+    Camera3IOStreamBase::dump(fd, args);
+}
+
+status_t Camera3DummyStream::setTransform(int transform) {
+    ATRACE_CALL();
+    // Do nothing
+    return OK;
+}
+
+status_t Camera3DummyStream::configureQueueLocked() {
+    // Do nothing
+    return OK;
+}
+
+status_t Camera3DummyStream::disconnectLocked() {
+    mState = (mState == STATE_IN_RECONFIG) ? STATE_IN_CONFIG
+                                           : STATE_CONSTRUCTED;
+    return OK;
+}
+
+status_t Camera3DummyStream::getEndpointUsage(uint32_t *usage) {
+    *usage = DUMMY_USAGE;
+    return OK;
+}
+
+}; // namespace camera3
+
+}; // namespace android
diff --git a/services/camera/libcameraservice/device3/Camera3DummyStream.h b/services/camera/libcameraservice/device3/Camera3DummyStream.h
new file mode 100644
index 0000000..3e42623
--- /dev/null
+++ b/services/camera/libcameraservice/device3/Camera3DummyStream.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA3_DUMMY_STREAM_H
+#define ANDROID_SERVERS_CAMERA3_DUMMY_STREAM_H
+
+#include <utils/RefBase.h>
+#include <gui/Surface.h>
+
+#include "Camera3Stream.h"
+#include "Camera3IOStreamBase.h"
+#include "Camera3OutputStreamInterface.h"
+
+namespace android {
+namespace camera3 {
+
+/**
+ * A dummy output stream class, to be used as a placeholder when no valid
+ * streams are configured by the client.
+ * This is necessary because camera HAL v3.2 or older disallow configuring
+ * 0 output streams, while the public camera2 API allows for it.
+ */
+class Camera3DummyStream :
+        public Camera3IOStreamBase,
+        public Camera3OutputStreamInterface {
+
+  public:
+    /**
+     * Set up a dummy stream; doesn't actually connect to anything, and uses
+     * a default dummy format and size.
+     */
+    Camera3DummyStream(int id);
+
+    virtual ~Camera3DummyStream();
+
+    /**
+     * Camera3Stream interface
+     */
+
+    virtual void     dump(int fd, const Vector<String16> &args) const;
+
+    status_t         setTransform(int transform);
+
+  protected:
+
+    /**
+     * Note that we release the lock briefly in this function
+     */
+    virtual status_t returnBufferCheckedLocked(
+            const camera3_stream_buffer &buffer,
+            nsecs_t timestamp,
+            bool output,
+            /*out*/
+            sp<Fence> *releaseFenceOut);
+
+    virtual status_t disconnectLocked();
+
+  private:
+
+    // Default dummy parameters; 320x240 is a required size for all devices,
+    // otherwise act like a SurfaceView would.
+    static const int DUMMY_WIDTH = 320;
+    static const int DUMMY_HEIGHT = 240;
+    static const int DUMMY_FORMAT = HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED;
+    static const uint32_t DUMMY_USAGE = GRALLOC_USAGE_HW_COMPOSER;
+
+    /**
+     * Internal Camera3Stream interface
+     */
+    virtual status_t getBufferLocked(camera3_stream_buffer *buffer);
+    virtual status_t returnBufferLocked(
+            const camera3_stream_buffer &buffer,
+            nsecs_t timestamp);
+
+    virtual status_t configureQueueLocked();
+
+    virtual status_t getEndpointUsage(uint32_t *usage);
+
+}; // class Camera3DummyStream
+
+} // namespace camera3
+
+} // namespace android
+
+#endif
diff --git a/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp b/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp
index 50a2c10..cc66459 100644
--- a/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp
+++ b/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp
@@ -184,15 +184,6 @@
         return INVALID_OPERATION;
     }
 
-    // Only limit dequeue amount when fully configured
-    if (mState == STATE_CONFIGURED &&
-            mHandoutTotalBufferCount == camera3_stream::max_buffers) {
-        ALOGE("%s: Stream %d: Already dequeued maximum number of simultaneous"
-                " buffers (%d)", __FUNCTION__, mId,
-                camera3_stream::max_buffers);
-        return INVALID_OPERATION;
-    }
-
     return OK;
 }
 
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.cpp b/services/camera/libcameraservice/device3/Camera3Stream.cpp
index d7b1871..29ce38c 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Stream.cpp
@@ -209,6 +209,35 @@
     return res;
 }
 
+status_t Camera3Stream::cancelConfiguration() {
+    ATRACE_CALL();
+    Mutex::Autolock l(mLock);
+    switch (mState) {
+        case STATE_ERROR:
+            ALOGE("%s: In error state", __FUNCTION__);
+            return INVALID_OPERATION;
+        case STATE_IN_CONFIG:
+        case STATE_IN_RECONFIG:
+            // OK
+            break;
+        case STATE_CONSTRUCTED:
+        case STATE_CONFIGURED:
+            ALOGE("%s: Cannot cancel configuration that hasn't been started",
+                    __FUNCTION__);
+            return INVALID_OPERATION;
+        default:
+            ALOGE("%s: Unknown state", __FUNCTION__);
+            return INVALID_OPERATION;
+    }
+
+    camera3_stream::usage = oldUsage;
+    camera3_stream::max_buffers = oldMaxBuffers;
+
+    mState = STATE_CONSTRUCTED;
+
+    return OK;
+}
+
 status_t Camera3Stream::getBuffer(camera3_stream_buffer *buffer) {
     ATRACE_CALL();
     Mutex::Autolock l(mLock);
@@ -381,18 +410,7 @@
     if (hal3Device->common.version >= CAMERA_DEVICE_API_VERSION_3_2) {
         ALOGV("%s: register_stream_buffers unused as of HAL3.2", __FUNCTION__);
 
-        /**
-         * Skip the NULL check if camera.dev.register_stream is 1.
-         *
-         * For development-validation purposes only.
-         *
-         * TODO: Remove the property check before shipping L (b/13914251).
-         */
-        char value[PROPERTY_VALUE_MAX] = { '\0', };
-        property_get("camera.dev.register_stream", value, "0");
-        int propInt = atoi(value);
-
-        if (propInt == 0 && hal3Device->ops->register_stream_buffers != NULL) {
+        if (hal3Device->ops->register_stream_buffers != NULL) {
             ALOGE("%s: register_stream_buffers is deprecated in HAL3.2; "
                     "must be set to NULL in camera3_device::ops", __FUNCTION__);
             return INVALID_OPERATION;
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.h b/services/camera/libcameraservice/device3/Camera3Stream.h
index a77f27c..d0e1337 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.h
+++ b/services/camera/libcameraservice/device3/Camera3Stream.h
@@ -159,6 +159,13 @@
     status_t         finishConfiguration(camera3_device *hal3Device);
 
     /**
+     * Cancels the stream configuration process. This returns the stream to the
+     * initial state, allowing it to be configured again later.
+     * This is done if the HAL rejects the proposed combined stream configuration
+     */
+    status_t         cancelConfiguration();
+
+    /**
      * Fill in the camera3_stream_buffer with the next valid buffer for this
      * stream, to hand over to the HAL.
      *
diff --git a/services/camera/libcameraservice/device3/Camera3StreamInterface.h b/services/camera/libcameraservice/device3/Camera3StreamInterface.h
index c93ae15..da989cd 100644
--- a/services/camera/libcameraservice/device3/Camera3StreamInterface.h
+++ b/services/camera/libcameraservice/device3/Camera3StreamInterface.h
@@ -82,6 +82,13 @@
     virtual status_t finishConfiguration(camera3_device *hal3Device) = 0;
 
     /**
+     * Cancels the stream configuration process. This returns the stream to the
+     * initial state, allowing it to be configured again later.
+     * This is done if the HAL rejects the proposed combined stream configuration
+     */
+    virtual status_t cancelConfiguration() = 0;
+
+    /**
      * Fill in the camera3_stream_buffer with the next valid buffer for this
      * stream, to hand over to the HAL.
      *
diff --git a/services/soundtrigger/SoundTriggerHwService.cpp b/services/soundtrigger/SoundTriggerHwService.cpp
index 2502e0d..b5aaee3 100644
--- a/services/soundtrigger/SoundTriggerHwService.cpp
+++ b/services/soundtrigger/SoundTriggerHwService.cpp
@@ -249,7 +249,7 @@
         event->data_offset = sizeof(struct sound_trigger_recognition_event);
         break;
     default:
-            return eventMemory;
+        return eventMemory;
     }
 
     size_t size = event->data_offset + event->data_size;
@@ -653,7 +653,6 @@
 {
     ALOGV("onCallbackEvent type %d", event->mType);
 
-    AutoMutex lock(mLock);
     sp<IMemory> eventMemory = event->mMemory;
 
     if (eventMemory == 0 || eventMemory->pointer() == NULL) {
@@ -668,34 +667,53 @@
     case CallbackEvent::TYPE_RECOGNITION: {
         struct sound_trigger_recognition_event *recognitionEvent =
                 (struct sound_trigger_recognition_event *)eventMemory->pointer();
+        sp<ISoundTriggerClient> client;
+        {
+            AutoMutex lock(mLock);
+            sp<Model> model = getModel(recognitionEvent->model);
+            if (model == 0) {
+                ALOGW("%s model == 0", __func__);
+                return;
+            }
+            if (model->mState != Model::STATE_ACTIVE) {
+                ALOGV("onCallbackEvent model->mState %d != Model::STATE_ACTIVE", model->mState);
+                return;
+            }
 
-        sp<Model> model = getModel(recognitionEvent->model);
-        if (model == 0) {
-            ALOGW("%s model == 0", __func__);
-            return;
+            recognitionEvent->capture_session = model->mCaptureSession;
+            model->mState = Model::STATE_IDLE;
+            client = mClient;
         }
-        if (model->mState != Model::STATE_ACTIVE) {
-            ALOGV("onCallbackEvent model->mState %d != Model::STATE_ACTIVE", model->mState);
-            return;
+        if (client != 0) {
+            client->onRecognitionEvent(eventMemory);
         }
-
-        recognitionEvent->capture_session = model->mCaptureSession;
-        mClient->onRecognitionEvent(eventMemory);
-        model->mState = Model::STATE_IDLE;
     } break;
     case CallbackEvent::TYPE_SOUNDMODEL: {
         struct sound_trigger_model_event *soundmodelEvent =
                 (struct sound_trigger_model_event *)eventMemory->pointer();
-
-        sp<Model> model = getModel(soundmodelEvent->model);
-        if (model == 0) {
-            ALOGW("%s model == 0", __func__);
-            return;
+        sp<ISoundTriggerClient> client;
+        {
+            AutoMutex lock(mLock);
+            sp<Model> model = getModel(soundmodelEvent->model);
+            if (model == 0) {
+                ALOGW("%s model == 0", __func__);
+                return;
+            }
+            client = mClient;
         }
-        mClient->onSoundModelEvent(eventMemory);
+        if (client != 0) {
+            client->onSoundModelEvent(eventMemory);
+        }
     } break;
     case CallbackEvent::TYPE_SERVICE_STATE: {
-        mClient->onServiceStateChange(eventMemory);
+        sp<ISoundTriggerClient> client;
+        {
+            AutoMutex lock(mLock);
+            client = mClient;
+        }
+        if (client != 0) {
+            client->onServiceStateChange(eventMemory);
+        }
     } break;
     default:
         LOG_ALWAYS_FATAL("onCallbackEvent unknown event type %d", event->mType);