Merge "PlaylistFetcher: avoid repeated fetch when we run off the edge of live playlists" into lmp-dev
diff --git a/include/media/AudioTrack.h b/include/media/AudioTrack.h
index a3cc396..72e51f9 100644
--- a/include/media/AudioTrack.h
+++ b/include/media/AudioTrack.h
@@ -430,7 +430,7 @@
      *  - NO_ERROR: successful operation
      *  - BAD_VALUE:  position is NULL
      */
-            status_t    getPosition(uint32_t *position) const;
+            status_t    getPosition(uint32_t *position);
 
     /* For static buffer mode only, this returns the current playback position in frames
      * relative to start of buffer.  It is analogous to the position units used by
@@ -581,6 +581,7 @@
      * if you need a high resolution mapping between frame position and presentation time,
      * consider implementing that at application level, based on the low resolution timestamps.
      * Returns NO_ERROR if timestamp is valid.
+     * The timestamp parameter is undefined on return, if status is not NO_ERROR.
      */
             status_t    getTimestamp(AudioTimestamp& timestamp);
 
@@ -639,7 +640,7 @@
 
             // caller must hold lock on mLock for all _l methods
 
-            status_t createTrack_l(size_t epoch);
+            status_t createTrack_l();
 
             // can only be called when mState != STATE_ACTIVE
             void flush_l();
@@ -659,6 +660,9 @@
             bool     isDirect_l() const
                 { return (mFlags & AUDIO_OUTPUT_FLAG_DIRECT) != 0; }
 
+            // increment mPosition by the delta of mServer, and return new value of mPosition
+            uint32_t updateAndGetPosition_l();
+
     // Next 4 fields may be changed if IAudioTrack is re-created, but always != 0
     sp<IAudioTrack>         mAudioTrack;
     sp<IMemory>             mCblkMemory;
@@ -731,6 +735,18 @@
     bool                    mMarkerReached;
     uint32_t                mNewPosition;           // in frames
     uint32_t                mUpdatePeriod;          // in frames, zero means no EVENT_NEW_POS
+    uint32_t                mServer;                // in frames, last known mProxy->getPosition()
+                                                    // which is count of frames consumed by server,
+                                                    // reset by new IAudioTrack,
+                                                    // whether it is reset by stop() is TBD
+    uint32_t                mPosition;              // in frames, like mServer except continues
+                                                    // monotonically after new IAudioTrack,
+                                                    // and could be easily widened to uint64_t
+    uint32_t                mReleased;              // in frames, count of frames released to server
+                                                    // but not necessarily consumed by server,
+                                                    // reset by stop() but continues monotonically
+                                                    // after new IAudioTrack to restore mPosition,
+                                                    // and could be easily widened to uint64_t
 
     audio_output_flags_t    mFlags;
         // const after set(), except for bits AUDIO_OUTPUT_FLAG_FAST and AUDIO_OUTPUT_FLAG_OFFLOAD.
diff --git a/include/media/IAudioTrack.h b/include/media/IAudioTrack.h
index 5c8a484..619ac78 100644
--- a/include/media/IAudioTrack.h
+++ b/include/media/IAudioTrack.h
@@ -88,7 +88,7 @@
     /* Send parameters to the audio hardware */
     virtual status_t    setParameters(const String8& keyValuePairs) = 0;
 
-    /* Return NO_ERROR if timestamp is valid */
+    /* Return NO_ERROR if timestamp is valid.  timestamp is undefined otherwise. */
     virtual status_t    getTimestamp(AudioTimestamp& timestamp) = 0;
 
     /* Signal the playback thread for a change in control block */
diff --git a/include/media/MediaProfiles.h b/include/media/MediaProfiles.h
index 253c557..f061d22 100644
--- a/include/media/MediaProfiles.h
+++ b/include/media/MediaProfiles.h
@@ -54,7 +54,8 @@
     CAMCORDER_QUALITY_HIGH_SPEED_480P = 2002,
     CAMCORDER_QUALITY_HIGH_SPEED_720P = 2003,
     CAMCORDER_QUALITY_HIGH_SPEED_1080P = 2004,
-    CAMCORDER_QUALITY_HIGH_SPEED_LIST_END = 2004,
+    CAMCORDER_QUALITY_HIGH_SPEED_2160P = 2005,
+    CAMCORDER_QUALITY_HIGH_SPEED_LIST_END = 2005,
 };
 
 /**
diff --git a/include/media/nbaio/NBAIO.h b/include/media/nbaio/NBAIO.h
index be0c15b..d422576 100644
--- a/include/media/nbaio/NBAIO.h
+++ b/include/media/nbaio/NBAIO.h
@@ -227,7 +227,7 @@
 
     // Returns NO_ERROR if a timestamp is available.  The timestamp includes the total number
     // of frames presented to an external observer, together with the value of CLOCK_MONOTONIC
-    // as of this presentation count.
+    // as of this presentation count.  The timestamp parameter is undefined if error is returned.
     virtual status_t getTimestamp(AudioTimestamp& timestamp) { return INVALID_OPERATION; }
 
 protected:
diff --git a/include/media/stagefright/MediaErrors.h b/include/media/stagefright/MediaErrors.h
index 7540e07..2e663ec 100644
--- a/include/media/stagefright/MediaErrors.h
+++ b/include/media/stagefright/MediaErrors.h
@@ -58,20 +58,22 @@
     // drm/drm_framework_common.h
     DRM_ERROR_BASE = -2000,
 
-    ERROR_DRM_UNKNOWN                       = DRM_ERROR_BASE,
-    ERROR_DRM_NO_LICENSE                    = DRM_ERROR_BASE - 1,
-    ERROR_DRM_LICENSE_EXPIRED               = DRM_ERROR_BASE - 2,
-    ERROR_DRM_SESSION_NOT_OPENED            = DRM_ERROR_BASE - 3,
-    ERROR_DRM_DECRYPT_UNIT_NOT_INITIALIZED  = DRM_ERROR_BASE - 4,
-    ERROR_DRM_DECRYPT                       = DRM_ERROR_BASE - 5,
-    ERROR_DRM_CANNOT_HANDLE                 = DRM_ERROR_BASE - 6,
-    ERROR_DRM_TAMPER_DETECTED               = DRM_ERROR_BASE - 7,
-    ERROR_DRM_NOT_PROVISIONED               = DRM_ERROR_BASE - 8,
-    ERROR_DRM_DEVICE_REVOKED                = DRM_ERROR_BASE - 9,
-    ERROR_DRM_RESOURCE_BUSY                 = DRM_ERROR_BASE - 10,
+    ERROR_DRM_UNKNOWN                        = DRM_ERROR_BASE,
+    ERROR_DRM_NO_LICENSE                     = DRM_ERROR_BASE - 1,
+    ERROR_DRM_LICENSE_EXPIRED                = DRM_ERROR_BASE - 2,
+    ERROR_DRM_SESSION_NOT_OPENED             = DRM_ERROR_BASE - 3,
+    ERROR_DRM_DECRYPT_UNIT_NOT_INITIALIZED   = DRM_ERROR_BASE - 4,
+    ERROR_DRM_DECRYPT                        = DRM_ERROR_BASE - 5,
+    ERROR_DRM_CANNOT_HANDLE                  = DRM_ERROR_BASE - 6,
+    ERROR_DRM_TAMPER_DETECTED                = DRM_ERROR_BASE - 7,
+    ERROR_DRM_NOT_PROVISIONED                = DRM_ERROR_BASE - 8,
+    ERROR_DRM_DEVICE_REVOKED                 = DRM_ERROR_BASE - 9,
+    ERROR_DRM_RESOURCE_BUSY                  = DRM_ERROR_BASE - 10,
+    ERROR_DRM_INSUFFICIENT_OUTPUT_PROTECTION = DRM_ERROR_BASE - 11,
+    ERROR_DRM_LAST_USED_ERRORCODE            = DRM_ERROR_BASE - 11,
 
-    ERROR_DRM_VENDOR_MAX                    = DRM_ERROR_BASE - 500,
-    ERROR_DRM_VENDOR_MIN                    = DRM_ERROR_BASE - 999,
+    ERROR_DRM_VENDOR_MAX                     = DRM_ERROR_BASE - 500,
+    ERROR_DRM_VENDOR_MIN                     = DRM_ERROR_BASE - 999,
 
     // Heartbeat Error Codes
     HEARTBEAT_ERROR_BASE = -3000,
@@ -100,7 +102,7 @@
 
 // returns true if err is a recognized DRM error code
 static inline bool isCryptoError(status_t err) {
-    return (ERROR_DRM_RESOURCE_BUSY <= err && err <= ERROR_DRM_UNKNOWN)
+    return (ERROR_DRM_LAST_USED_ERRORCODE <= err && err <= ERROR_DRM_UNKNOWN)
             || (ERROR_DRM_VENDOR_MIN <= err && err <= ERROR_DRM_VENDOR_MAX);
 }
 
diff --git a/include/media/stagefright/foundation/AMessage.h b/include/media/stagefright/foundation/AMessage.h
index 5846d6b..a9e235b 100644
--- a/include/media/stagefright/foundation/AMessage.h
+++ b/include/media/stagefright/foundation/AMessage.h
@@ -137,7 +137,9 @@
             Rect rectValue;
         } u;
         const char *mName;
+        size_t      mNameLength;
         Type mType;
+        void setName(const char *name, size_t len);
     };
 
     enum {
@@ -147,12 +149,14 @@
     size_t mNumItems;
 
     Item *allocateItem(const char *name);
-    void freeItem(Item *item);
+    void freeItemValue(Item *item);
     const Item *findItem(const char *name, Type type) const;
 
     void setObjectInternal(
             const char *name, const sp<RefBase> &obj, Type type);
 
+    size_t findItemIndex(const char *name, size_t len) const;
+
     DISALLOW_EVIL_CONSTRUCTORS(AMessage);
 };
 
diff --git a/media/libmedia/AudioSystem.cpp b/media/libmedia/AudioSystem.cpp
index 3486d21..1742fbe 100644
--- a/media/libmedia/AudioSystem.cpp
+++ b/media/libmedia/AudioSystem.cpp
@@ -582,9 +582,13 @@
         }
         binder->linkToDeath(gAudioPolicyServiceClient);
         gAudioPolicyService = interface_cast<IAudioPolicyService>(binder);
-        gAudioPolicyService->registerClient(gAudioPolicyServiceClient);
         gLock.unlock();
+        // Registering the client takes the AudioPolicyService lock.
+        // Don't hold the AudioSystem lock at the same time.
+        gAudioPolicyService->registerClient(gAudioPolicyServiceClient);
     } else {
+        // There exists a benign race condition where gAudioPolicyService
+        // is set, but gAudioPolicyServiceClient is not yet registered.
         gLock.unlock();
     }
     return gAudioPolicyService;
diff --git a/media/libmedia/AudioTrack.cpp b/media/libmedia/AudioTrack.cpp
index d87e6f5..ff7da83 100644
--- a/media/libmedia/AudioTrack.cpp
+++ b/media/libmedia/AudioTrack.cpp
@@ -398,7 +398,7 @@
     }
 
     // create the IAudioTrack
-    status = createTrack_l(0 /*epoch*/);
+    status = createTrack_l();
 
     if (status != NO_ERROR) {
         if (mAudioTrackThread != 0) {
@@ -417,6 +417,9 @@
     mMarkerReached = false;
     mNewPosition = 0;
     mUpdatePeriod = 0;
+    mServer = 0;
+    mPosition = 0;
+    mReleased = 0;
     AudioSystem::acquireAudioSessionId(mSessionId, mClientPid);
     mSequence = 1;
     mObservedSequence = mSequence;
@@ -443,14 +446,16 @@
     } else {
         mState = STATE_ACTIVE;
     }
+    (void) updateAndGetPosition_l();
     if (previousState == STATE_STOPPED || previousState == STATE_FLUSHED) {
         // reset current position as seen by client to 0
-        mProxy->setEpoch(mProxy->getEpoch() - mProxy->getPosition());
+        mPosition = 0;
+        mReleased = 0;
         // force refresh of remaining frames by processAudioBuffer() as last
         // write before stop could be partial.
         mRefreshRemaining = true;
     }
-    mNewPosition = mProxy->getPosition() + mUpdatePeriod;
+    mNewPosition = mPosition + mUpdatePeriod;
     int32_t flags = android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags);
 
     sp<AudioTrackThread> t = mAudioTrackThread;
@@ -709,7 +714,7 @@
 {
     // FIXME If setting a loop also sets position to start of loop, then
     //       this is correct.  Otherwise it should be removed.
-    mNewPosition = mProxy->getPosition() + mUpdatePeriod;
+    mNewPosition = updateAndGetPosition_l() + mUpdatePeriod;
     mLoopPeriod = loopCount != 0 ? loopEnd - loopStart : 0;
     mStaticProxy->setLoop(loopStart, loopEnd, loopCount);
 }
@@ -751,7 +756,7 @@
     }
 
     AutoMutex lock(mLock);
-    mNewPosition = mProxy->getPosition() + updatePeriod;
+    mNewPosition = updateAndGetPosition_l() + updatePeriod;
     mUpdatePeriod = updatePeriod;
 
     return NO_ERROR;
@@ -791,7 +796,7 @@
     if (mState == STATE_ACTIVE) {
         return INVALID_OPERATION;
     }
-    mNewPosition = mProxy->getPosition() + mUpdatePeriod;
+    mNewPosition = updateAndGetPosition_l() + mUpdatePeriod;
     mLoopPeriod = 0;
     // FIXME Check whether loops and setting position are incompatible in old code.
     // If we use setLoop for both purposes we lose the capability to set the position while looping.
@@ -800,7 +805,7 @@
     return NO_ERROR;
 }
 
-status_t AudioTrack::getPosition(uint32_t *position) const
+status_t AudioTrack::getPosition(uint32_t *position)
 {
     if (position == NULL) {
         return BAD_VALUE;
@@ -823,8 +828,8 @@
         *position = dspFrames;
     } else {
         // IAudioTrack::stop() isn't synchronous; we don't know when presentation completes
-        *position = (mState == STATE_STOPPED || mState == STATE_FLUSHED) ? 0 :
-                mProxy->getPosition();
+        *position = (mState == STATE_STOPPED || mState == STATE_FLUSHED) ?
+                0 : updateAndGetPosition_l();
     }
     return NO_ERROR;
 }
@@ -881,7 +886,7 @@
 // -------------------------------------------------------------------------
 
 // must be called with mLock held
-status_t AudioTrack::createTrack_l(size_t epoch)
+status_t AudioTrack::createTrack_l()
 {
     status_t status;
     const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
@@ -1184,7 +1189,6 @@
     mProxy->setVolumeLR(GAIN_MINIFLOAT_PACKED_UNITY);
     mProxy->setSendLevel(mSendLevel);
     mProxy->setSampleRate(mSampleRate);
-    mProxy->setEpoch(epoch);
     mProxy->setMinimum(mNotificationFramesAct);
 
     mDeathNotifier = new DeathNotifier(this);
@@ -1319,6 +1323,7 @@
     buffer.mRaw = audioBuffer->raw;
 
     AutoMutex lock(mLock);
+    mReleased += stepCount;
     mInUnderrun = false;
     mProxy->releaseBuffer(&buffer);
 
@@ -1531,7 +1536,7 @@
     }
 
     // Get current position of server
-    size_t position = mProxy->getPosition();
+    size_t position = updateAndGetPosition_l();
 
     // Manage marker callback
     bool markerReached = false;
@@ -1796,14 +1801,18 @@
         return DEAD_OBJECT;
     }
 
-    // if the new IAudioTrack is created, createTrack_l() will modify the
+    // save the old static buffer position
+    size_t bufferPosition = mStaticProxy != NULL ? mStaticProxy->getBufferPosition() : 0;
+
+    // If a new IAudioTrack is successfully created, createTrack_l() will modify the
     // following member variables: mAudioTrack, mCblkMemory and mCblk.
-    // It will also delete the strong references on previous IAudioTrack and IMemory
+    // It will also delete the strong references on previous IAudioTrack and IMemory.
+    // If a new IAudioTrack cannot be created, the previous (dead) instance will be left intact.
+    result = createTrack_l();
 
     // take the frames that will be lost by track recreation into account in saved position
-    size_t position = mProxy->getPosition() + mProxy->getFramesFilled();
-    size_t bufferPosition = mStaticProxy != NULL ? mStaticProxy->getBufferPosition() : 0;
-    result = createTrack_l(position /*epoch*/);
+    (void) updateAndGetPosition_l();
+    mPosition = mReleased;
 
     if (result == NO_ERROR) {
         // continue playback from last known position, but
@@ -1838,6 +1847,27 @@
     return result;
 }
 
+uint32_t AudioTrack::updateAndGetPosition_l()
+{
+    // This is the sole place to read server consumed frames
+    uint32_t newServer = mProxy->getPosition();
+    int32_t delta = newServer - mServer;
+    mServer = newServer;
+    // TODO There is controversy about whether there can be "negative jitter" in server position.
+    //      This should be investigated further, and if possible, it should be addressed.
+    //      A more definite failure mode is infrequent polling by client.
+    //      One could call (void)getPosition_l() in releaseBuffer(),
+    //      so mReleased and mPosition are always lock-step as best possible.
+    //      That should ensure delta never goes negative for infrequent polling
+    //      unless the server has more than 2^31 frames in its buffer,
+    //      in which case the use of uint32_t for these counters has bigger issues.
+    if (delta < 0) {
+        ALOGE("detected illegal retrograde motion by the server: mServer advanced by %d", delta);
+        delta = 0;
+    }
+    return mPosition += (uint32_t) delta;
+}
+
 status_t AudioTrack::setParameters(const String8& keyValuePairs)
 {
     AutoMutex lock(mLock);
@@ -1854,9 +1884,34 @@
     if (mState != STATE_ACTIVE && mState != STATE_PAUSED) {
         return INVALID_OPERATION;
     }
+    // The presented frame count must always lag behind the consumed frame count.
+    // To avoid a race, read the presented frames first.  This ensures that presented <= consumed.
     status_t status = mAudioTrack->getTimestamp(timestamp);
     if (status == NO_ERROR) {
-        timestamp.mPosition += mProxy->getEpoch();
+        // Update the mapping between local consumed (mPosition) and server consumed (mServer)
+        (void) updateAndGetPosition_l();
+        // Server consumed (mServer) and presented both use the same server time base,
+        // and server consumed is always >= presented.
+        // The delta between these represents the number of frames in the buffer pipeline.
+        // If this delta between these is greater than the client position, it means that
+        // actually presented is still stuck at the starting line (figuratively speaking),
+        // waiting for the first frame to go by.  So we can't report a valid timestamp yet.
+        if ((uint32_t) (mServer - timestamp.mPosition) > mPosition) {
+            return INVALID_OPERATION;
+        }
+        // Convert timestamp position from server time base to client time base.
+        // TODO The following code should work OK now because timestamp.mPosition is 32-bit.
+        // But if we change it to 64-bit then this could fail.
+        // If (mPosition - mServer) can be negative then should use:
+        //   (int32_t)(mPosition - mServer)
+        timestamp.mPosition += mPosition - mServer;
+        // Immediately after a call to getPosition_l(), mPosition and
+        // mServer both represent the same frame position.  mPosition is
+        // in client's point of view, and mServer is in server's point of
+        // view.  So the difference between them is the "fudge factor"
+        // between client and server views due to stop() and/or new
+        // IAudioTrack.  And timestamp.mPosition is initially in server's
+        // point of view, so we need to apply the same fudge factor to it.
     }
     return status;
 }
diff --git a/media/libmedia/MediaProfiles.cpp b/media/libmedia/MediaProfiles.cpp
index d2e181b..e2e6042 100644
--- a/media/libmedia/MediaProfiles.cpp
+++ b/media/libmedia/MediaProfiles.cpp
@@ -87,6 +87,7 @@
     {"highspeed480p", CAMCORDER_QUALITY_HIGH_SPEED_480P},
     {"highspeed720p", CAMCORDER_QUALITY_HIGH_SPEED_720P},
     {"highspeed1080p", CAMCORDER_QUALITY_HIGH_SPEED_1080P},
+    {"highspeed2160p", CAMCORDER_QUALITY_HIGH_SPEED_2160P},
 };
 
 #if LOG_NDEBUG
diff --git a/media/libmediaplayerservice/MediaPlayerFactory.cpp b/media/libmediaplayerservice/MediaPlayerFactory.cpp
index dacb144..3e0fc0d 100644
--- a/media/libmediaplayerservice/MediaPlayerFactory.cpp
+++ b/media/libmediaplayerservice/MediaPlayerFactory.cpp
@@ -60,7 +60,7 @@
     return OK;
 }
 
-player_type MediaPlayerFactory::getDefaultPlayerType() {
+static player_type getDefaultPlayerType() {
     char value[PROPERTY_VALUE_MAX];
     if (property_get("media.stagefright.use-awesome", value, NULL)
             && (!strcmp("1", value) || !strcasecmp("true", value))) {
@@ -181,16 +181,19 @@
                                int64_t offset,
                                int64_t /*length*/,
                                float /*curScore*/) {
-        char buf[20];
-        lseek(fd, offset, SEEK_SET);
-        read(fd, buf, sizeof(buf));
-        lseek(fd, offset, SEEK_SET);
+        if (getDefaultPlayerType()
+                == STAGEFRIGHT_PLAYER) {
+            char buf[20];
+            lseek(fd, offset, SEEK_SET);
+            read(fd, buf, sizeof(buf));
+            lseek(fd, offset, SEEK_SET);
 
-        uint32_t ident = *((uint32_t*)buf);
+            uint32_t ident = *((uint32_t*)buf);
 
-        // Ogg vorbis?
-        if (ident == 0x5367674f) // 'OggS'
-            return 1.0;
+            // Ogg vorbis?
+            if (ident == 0x5367674f) // 'OggS'
+                return 1.0;
+        }
 
         return 0.0;
     }
diff --git a/media/libmediaplayerservice/MediaPlayerFactory.h b/media/libmediaplayerservice/MediaPlayerFactory.h
index 5ddde19..55ff918 100644
--- a/media/libmediaplayerservice/MediaPlayerFactory.h
+++ b/media/libmediaplayerservice/MediaPlayerFactory.h
@@ -71,7 +71,6 @@
 
     static status_t registerFactory_l(IFactory* factory,
                                       player_type type);
-    static player_type getDefaultPlayerType();
 
     static Mutex       sLock;
     static tFactoryMap sFactoryMap;
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.cpp b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
index 8e1987a..ec1a9a0 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
@@ -54,7 +54,8 @@
       mDrmManagerClient(NULL),
       mMetaDataSize(-1ll),
       mBitrate(-1ll),
-      mPollBufferingGeneration(0) {
+      mPollBufferingGeneration(0),
+      mPendingReadBufferTypes(0) {
     resetDataSource();
     DataSource::RegisterDefaultSniffers();
 }
@@ -1148,15 +1149,27 @@
 }
 
 void NuPlayer::GenericSource::postReadBuffer(media_track_type trackType) {
-    sp<AMessage> msg = new AMessage(kWhatReadBuffer, id());
-    msg->setInt32("trackType", trackType);
-    msg->post();
+    Mutex::Autolock _l(mReadBufferLock);
+
+    if ((mPendingReadBufferTypes & (1 << trackType)) == 0) {
+        mPendingReadBufferTypes |= (1 << trackType);
+        sp<AMessage> msg = new AMessage(kWhatReadBuffer, id());
+        msg->setInt32("trackType", trackType);
+        msg->post();
+    }
 }
 
 void NuPlayer::GenericSource::onReadBuffer(sp<AMessage> msg) {
     int32_t tmpType;
     CHECK(msg->findInt32("trackType", &tmpType));
     media_track_type trackType = (media_track_type)tmpType;
+    {
+        // only protect the variable change, as readBuffer may
+        // take considerable time.  This may result in one extra
+        // read being processed, but that is benign.
+        Mutex::Autolock _l(mReadBufferLock);
+        mPendingReadBufferTypes &= ~(1 << trackType);
+    }
     readBuffer(trackType);
 }
 
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.h b/media/libmediaplayerservice/nuplayer/GenericSource.h
index 50ff98a..c70c48e 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.h
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.h
@@ -131,6 +131,8 @@
     off64_t mMetaDataSize;
     int64_t mBitrate;
     int32_t mPollBufferingGeneration;
+    uint32_t mPendingReadBufferTypes;
+    mutable Mutex mReadBufferLock;
 
     sp<ALooper> mLooper;
 
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
index 4a5d18a..df3e992 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
@@ -994,6 +994,8 @@
 
     ALOGV("both audio and video are flushed now.");
 
+    mPendingAudioAccessUnit.clear();
+
     if (mTimeDiscontinuityPending) {
         mRenderer->signalTimeDiscontinuity();
         mTimeDiscontinuityPending = false;
@@ -1242,7 +1244,8 @@
     CHECK(msg->findMessage("reply", &reply));
 
     if ((audio && mFlushingAudio != NONE)
-            || (!audio && mFlushingVideo != NONE)) {
+            || (!audio && mFlushingVideo != NONE)
+            || mSource == NULL) {
         reply->setInt32("err", INFO_DISCONTINUITY);
         reply->post();
         return OK;
@@ -1250,14 +1253,47 @@
 
     sp<ABuffer> accessUnit;
 
+    // Aggregate smaller buffers into a larger buffer.
+    // The goal is to reduce power consumption.
+    // Unfortunately this does not work with the software AAC decoder.
+    // TODO optimize buffer size for power consumption
+    // The offload read buffer size is 32 KB but 24 KB uses less power.
+    const int kAudioBigBufferSizeBytes = 24 * 1024;
+    bool doBufferAggregation = (audio && mOffloadAudio);
+    sp<ABuffer> biggerBuffer;
+    bool needMoreData = false;
+    int numSmallBuffers = 0;
+    bool gotTime = false;
+
     bool dropAccessUnit;
     do {
-        status_t err = mSource->dequeueAccessUnit(audio, &accessUnit);
+        status_t err;
+        // Did we save an accessUnit earlier because of a discontinuity?
+        if (audio && (mPendingAudioAccessUnit != NULL)) {
+            accessUnit = mPendingAudioAccessUnit;
+            mPendingAudioAccessUnit.clear();
+            err = mPendingAudioErr;
+            ALOGV("feedDecoderInputData() use mPendingAudioAccessUnit");
+        } else {
+            err = mSource->dequeueAccessUnit(audio, &accessUnit);
+        }
 
         if (err == -EWOULDBLOCK) {
-            return err;
+            if (biggerBuffer == NULL) {
+                return err;
+            } else {
+                break; // Reply with data that we already have.
+            }
         } else if (err != OK) {
             if (err == INFO_DISCONTINUITY) {
+                if (biggerBuffer != NULL) {
+                    // We already have some data so save this for later.
+                    mPendingAudioErr = err;
+                    mPendingAudioAccessUnit = accessUnit;
+                    accessUnit.clear();
+                    ALOGD("feedDecoderInputData() save discontinuity for later");
+                    break;
+                }
                 int32_t type;
                 CHECK(accessUnit->meta()->findInt32("discontinuity", &type));
 
@@ -1362,7 +1398,52 @@
             dropAccessUnit = true;
             ++mNumFramesDropped;
         }
-    } while (dropAccessUnit);
+
+        size_t smallSize = accessUnit->size();
+        needMoreData = false;
+        if (doBufferAggregation && (biggerBuffer == NULL)
+                // Don't bother if only room for a few small buffers.
+                && (smallSize < (kAudioBigBufferSizeBytes / 3))) {
+            // Create a larger buffer for combining smaller buffers from the extractor.
+            biggerBuffer = new ABuffer(kAudioBigBufferSizeBytes);
+            biggerBuffer->setRange(0, 0); // start empty
+        }
+
+        if (biggerBuffer != NULL) {
+            int64_t timeUs;
+            bool smallTimestampValid = accessUnit->meta()->findInt64("timeUs", &timeUs);
+            // Will the smaller buffer fit?
+            size_t bigSize = biggerBuffer->size();
+            size_t roomLeft = biggerBuffer->capacity() - bigSize;
+            // Should we save this small buffer for the next big buffer?
+            // If the first small buffer did not have a timestamp then save
+            // any buffer that does have a timestamp until the next big buffer.
+            if ((smallSize > roomLeft)
+                || (!gotTime && (numSmallBuffers > 0) && smallTimestampValid)) {
+                mPendingAudioErr = err;
+                mPendingAudioAccessUnit = accessUnit;
+                accessUnit.clear();
+            } else {
+                // Append small buffer to the bigger buffer.
+                memcpy(biggerBuffer->base() + bigSize, accessUnit->data(), smallSize);
+                bigSize += smallSize;
+                biggerBuffer->setRange(0, bigSize);
+
+                // Keep looping until we run out of room in the biggerBuffer.
+                needMoreData = true;
+
+                // Grab time from first small buffer if available.
+                if ((numSmallBuffers == 0) && smallTimestampValid) {
+                    biggerBuffer->meta()->setInt64("timeUs", timeUs);
+                    gotTime = true;
+                }
+
+                ALOGV("feedDecoderInputData() #%d, smallSize = %zu, bigSize = %zu, capacity = %zu",
+                        numSmallBuffers, smallSize, bigSize, biggerBuffer->capacity());
+                numSmallBuffers++;
+            }
+        }
+    } while (dropAccessUnit || needMoreData);
 
     // ALOGV("returned a valid buffer of %s data", audio ? "audio" : "video");
 
@@ -1378,7 +1459,13 @@
         mCCDecoder->decode(accessUnit);
     }
 
-    reply->setBuffer("buffer", accessUnit);
+    if (biggerBuffer != NULL) {
+        ALOGV("feedDecoderInputData() reply with aggregated buffer, %d", numSmallBuffers);
+        reply->setBuffer("buffer", biggerBuffer);
+    } else {
+        reply->setBuffer("buffer", accessUnit);
+    }
+
     reply->post();
 
     return OK;
@@ -1745,6 +1832,9 @@
     ++mScanSourcesGeneration;
     mScanSourcesPending = false;
 
+    ++mAudioDecoderGeneration;
+    ++mVideoDecoderGeneration;
+
     if (mRendererLooper != NULL) {
         if (mRenderer != NULL) {
             mRendererLooper->unregisterHandler(mRenderer->id());
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.h b/media/libmediaplayerservice/nuplayer/NuPlayer.h
index 0c7f531..89ae11c 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.h
@@ -158,6 +158,9 @@
     // notion of time has changed.
     bool mTimeDiscontinuityPending;
 
+    sp<ABuffer> mPendingAudioAccessUnit;
+    status_t    mPendingAudioErr;
+
     FlushStatus mFlushingAudio;
     FlushStatus mFlushingVideo;
 
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
index 8ce7baf..163a0b5 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
@@ -595,7 +595,18 @@
         {
             if (!isStaleReply(msg)) {
                 onInputBufferFilled(msg);
+            } else {
+                /* release any MediaBuffer passed in the stale buffer */
+                sp<ABuffer> buffer;
+                MediaBuffer *mediaBuffer = NULL;
+                if (msg->findBuffer("buffer", &buffer) &&
+                    buffer->meta()->findPointer(
+                            "mediaBuffer", (void **)&mediaBuffer) &&
+                    mediaBuffer != NULL) {
+                    mediaBuffer->release();
+                }
             }
+
             break;
         }
 
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.cpp
index c9be0dd..ab7906a 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.cpp
@@ -115,9 +115,12 @@
     notify->post();
     mPendingBuffers++;
 
-    sp<AMessage> message = new AMessage(kWhatRequestABuffer, id());
-    message->setInt32("generation", mBufferGeneration);
-    message->post();
+    // pending buffers will already result in requestABuffer
+    if (mPendingBuffers < kMaxPendingBuffers) {
+        sp<AMessage> message = new AMessage(kWhatRequestABuffer, id());
+        message->setInt32("generation", mBufferGeneration);
+        message->post();
+    }
     return;
 }
 
@@ -155,9 +158,7 @@
 void NuPlayer::DecoderPassThrough::onBufferConsumed(int32_t size) {
     mPendingBuffers--;
     mCachedBytes -= size;
-    sp<AMessage> message = new AMessage(kWhatRequestABuffer, id());
-    message->setInt32("generation", mBufferGeneration);
-    message->post();
+    requestABuffer();
 }
 
 void NuPlayer::DecoderPassThrough::onFlush() {
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
index 09324ae..7dd54c1 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
@@ -626,11 +626,14 @@
     switch (msg) {
         case MEDIA_PLAYBACK_COMPLETE:
         {
-            if (mLooping && mState != STATE_RESET_IN_PROGRESS) {
-                mLock.unlock();
-                mPlayer->seekToAsync(0);
-                mLock.lock();
-                break;
+            if (mState != STATE_RESET_IN_PROGRESS) {
+                if (mLooping) {
+                    mPlayer->seekToAsync(0);
+                    break;
+                }
+
+                mPlayer->pause();
+                mState = STATE_PAUSED;
             }
             // fall through
         }
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
index aad6e93..067784b 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
@@ -893,8 +893,10 @@
 }
 
 void NuPlayer::Renderer::onPause() {
-    CHECK(!mPaused);
-
+    if (mPaused) {
+        ALOGW("Renderer::onPause() called while already paused!");
+        return;
+    }
     {
         Mutex::Autolock autoLock(mLock);
         ++mAudioQueueGeneration;
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index fc2dd30..0bfc6e4 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -270,7 +270,20 @@
     }
 
     sp<AMessage> response;
-    return PostAndAwaitResponse(msg, &response);
+    status_t err = PostAndAwaitResponse(msg, &response);
+
+    if (err != OK && err != INVALID_OPERATION) {
+        // MediaCodec now set state to UNINITIALIZED upon any fatal error.
+        // To maintain backward-compatibility, do a reset() to put codec
+        // back into INITIALIZED state.
+        // But don't reset if the err is INVALID_OPERATION, which means
+        // the configure failure is due to wrong state.
+
+        ALOGE("configure failed with err 0x%08x, resetting...", err);
+        reset();
+    }
+
+    return err;
 }
 
 status_t MediaCodec::createInputSurface(
diff --git a/media/libstagefright/TimedEventQueue.cpp b/media/libstagefright/TimedEventQueue.cpp
index da50c56..1fdb244 100644
--- a/media/libstagefright/TimedEventQueue.cpp
+++ b/media/libstagefright/TimedEventQueue.cpp
@@ -338,7 +338,7 @@
             status_t status = mPowerManager->acquireWakeLock(POWERMANAGER_PARTIAL_WAKE_LOCK,
                                                              binder,
                                                              String16("TimedEventQueue"),
-                                                             String16("media"));
+                                                             String16("media"));    // not oneway
             IPCThreadState::self()->restoreCallingIdentity(token);
             if (status == NO_ERROR) {
                 mWakeLockToken = binder;
@@ -363,7 +363,7 @@
         CHECK(mWakeLockToken != 0);
         if (mPowerManager != 0) {
             int64_t token = IPCThreadState::self()->clearCallingIdentity();
-            mPowerManager->releaseWakeLock(mWakeLockToken, 0);
+            mPowerManager->releaseWakeLock(mWakeLockToken, 0);  // not oneway
             IPCThreadState::self()->restoreCallingIdentity(token);
         }
         mWakeLockToken.clear();
diff --git a/media/libstagefright/codecs/on2/dec/SoftVPX.cpp b/media/libstagefright/codecs/on2/dec/SoftVPX.cpp
index 2f63bdd..828577a 100644
--- a/media/libstagefright/codecs/on2/dec/SoftVPX.cpp
+++ b/media/libstagefright/codecs/on2/dec/SoftVPX.cpp
@@ -137,29 +137,10 @@
 
             uint32_t width = mImg->d_w;
             uint32_t height = mImg->d_h;
-
-            if (width != mWidth || height != mHeight) {
-                mWidth = width;
-                mHeight = height;
-
-                if (!mIsAdaptive || width > mAdaptiveMaxWidth || height > mAdaptiveMaxHeight) {
-                    if (mIsAdaptive) {
-                        if (width > mAdaptiveMaxWidth) {
-                            mAdaptiveMaxWidth = width;
-                        }
-                        if (height > mAdaptiveMaxHeight) {
-                            mAdaptiveMaxHeight = height;
-                        }
-                    }
-                    updatePortDefinitions();
-                    notify(OMX_EventPortSettingsChanged, kOutputPortIndex, 0, NULL);
-                    mOutputPortSettingsChange = AWAITING_DISABLED;
-                    return;
-                } else {
-                    updatePortDefinitions();
-                    notify(OMX_EventPortSettingsChanged, kOutputPortIndex,
-                           OMX_IndexConfigCommonOutputCrop, NULL);
-                }
+            bool portWillReset = false;
+            handlePortSettingsChange(&portWillReset, width, height);
+            if (portWillReset) {
+                return;
             }
 
             outHeader->nOffset = 0;
@@ -167,36 +148,14 @@
             outHeader->nFlags = EOSseen ? OMX_BUFFERFLAG_EOS : 0;
             outHeader->nTimeStamp = inHeader->nTimeStamp;
 
-            uint32_t buffer_stride = mIsAdaptive ? mAdaptiveMaxWidth : mWidth;
-            uint32_t buffer_height = mIsAdaptive ? mAdaptiveMaxHeight : mHeight;
-
-            const uint8_t *srcLine = (const uint8_t *)mImg->planes[PLANE_Y];
             uint8_t *dst = outHeader->pBuffer;
-            for (size_t i = 0; i < buffer_height; ++i) {
-                if (i < mImg->d_h) {
-                    memcpy(dst, srcLine, mImg->d_w);
-                    srcLine += mImg->stride[PLANE_Y];
-                }
-                dst += buffer_stride;
-            }
-
-            srcLine = (const uint8_t *)mImg->planes[PLANE_U];
-            for (size_t i = 0; i < buffer_height / 2; ++i) {
-                if (i < mImg->d_h / 2) {
-                    memcpy(dst, srcLine, mImg->d_w / 2);
-                    srcLine += mImg->stride[PLANE_U];
-                }
-                dst += buffer_stride / 2;
-            }
-
-            srcLine = (const uint8_t *)mImg->planes[PLANE_V];
-            for (size_t i = 0; i < buffer_height / 2; ++i) {
-                if (i < mImg->d_h / 2) {
-                    memcpy(dst, srcLine, mImg->d_w / 2);
-                    srcLine += mImg->stride[PLANE_V];
-                }
-                dst += buffer_stride / 2;
-            }
+            const uint8_t *srcY = (const uint8_t *)mImg->planes[PLANE_Y];
+            const uint8_t *srcU = (const uint8_t *)mImg->planes[PLANE_U];
+            const uint8_t *srcV = (const uint8_t *)mImg->planes[PLANE_V];
+            size_t srcYStride = mImg->stride[PLANE_Y];
+            size_t srcUStride = mImg->stride[PLANE_U];
+            size_t srcVStride = mImg->stride[PLANE_V];
+            copyYV12FrameToOutputBuffer(dst, srcY, srcU, srcV, srcYStride, srcUStride, srcVStride);
 
             mImg = NULL;
             outInfo->mOwnedByUs = false;
diff --git a/media/libstagefright/codecs/on2/h264dec/SoftAVC.cpp b/media/libstagefright/codecs/on2/h264dec/SoftAVC.cpp
index a7bde97..cf3c3e3 100644
--- a/media/libstagefright/codecs/on2/h264dec/SoftAVC.cpp
+++ b/media/libstagefright/codecs/on2/h264dec/SoftAVC.cpp
@@ -58,7 +58,6 @@
             320 /* width */, 240 /* height */, callbacks, appData, component),
       mHandle(NULL),
       mInputBufferCount(0),
-      mPictureSize(mWidth * mHeight * 3 / 2),
       mFirstPicture(NULL),
       mFirstPictureId(-1),
       mPicId(0),
@@ -118,7 +117,7 @@
     }
 
     H264SwDecRet ret = H264SWDEC_PIC_RDY;
-    bool portSettingsChanged = false;
+    bool portWillReset = false;
     while ((mEOSStatus != INPUT_DATA_AVAILABLE || !inQueue.empty())
             && outQueue.size() == kNumOutputBuffers) {
 
@@ -161,17 +160,13 @@
                     H264SwDecInfo decoderInfo;
                     CHECK(H264SwDecGetInfo(mHandle, &decoderInfo) == H264SWDEC_OK);
 
-                    if (handlePortSettingChangeEvent(&decoderInfo)) {
-                        portSettingsChanged = true;
-                    }
-
-                    if (decoderInfo.croppingFlag &&
-                        handleCropRectEvent(&decoderInfo.cropParams)) {
-                        portSettingsChanged = true;
-                    }
+                    bool cropChanged = handleCropChange(decoderInfo);
+                    handlePortSettingsChange(
+                            &portWillReset, decoderInfo.picWidth, decoderInfo.picHeight,
+                            cropChanged);
                 }
             } else {
-                if (portSettingsChanged) {
+                if (portWillReset) {
                     if (H264SwDecNextPicture(mHandle, &decodedPicture, 0)
                         == H264SWDEC_PIC_RDY) {
 
@@ -199,8 +194,7 @@
         inInfo->mOwnedByUs = false;
         notifyEmptyBufferDone(inHeader);
 
-        if (portSettingsChanged) {
-            portSettingsChanged = false;
+        if (portWillReset) {
             return;
         }
 
@@ -215,44 +209,33 @@
     }
 }
 
-bool SoftAVC::handlePortSettingChangeEvent(const H264SwDecInfo *info) {
-    if (mWidth != info->picWidth || mHeight != info->picHeight) {
-        mWidth  = info->picWidth;
-        mHeight = info->picHeight;
-        mPictureSize = mWidth * mHeight * 3 / 2;
-        updatePortDefinitions();
-        notify(OMX_EventPortSettingsChanged, 1, 0, NULL);
-        mOutputPortSettingsChange = AWAITING_DISABLED;
-        return true;
+bool SoftAVC::handleCropChange(const H264SwDecInfo& decInfo) {
+    if (!decInfo.croppingFlag) {
+        return false;
     }
 
-    return false;
-}
-
-bool SoftAVC::handleCropRectEvent(const CropParams *crop) {
-    if (mCropLeft != crop->cropLeftOffset ||
-        mCropTop != crop->cropTopOffset ||
-        mCropWidth != crop->cropOutWidth ||
-        mCropHeight != crop->cropOutHeight) {
-        mCropLeft = crop->cropLeftOffset;
-        mCropTop = crop->cropTopOffset;
-        mCropWidth = crop->cropOutWidth;
-        mCropHeight = crop->cropOutHeight;
-
-        notify(OMX_EventPortSettingsChanged, 1,
-                OMX_IndexConfigCommonOutputCrop, NULL);
-
-        return true;
+    const CropParams& crop = decInfo.cropParams;
+    if (mCropLeft == crop.cropLeftOffset &&
+        mCropTop == crop.cropTopOffset &&
+        mCropWidth == crop.cropOutWidth &&
+        mCropHeight == crop.cropOutHeight) {
+        return false;
     }
-    return false;
+
+    mCropLeft = crop.cropLeftOffset;
+    mCropTop = crop.cropTopOffset;
+    mCropWidth = crop.cropOutWidth;
+    mCropHeight = crop.cropOutHeight;
+    return true;
 }
 
 void SoftAVC::saveFirstOutputBuffer(int32_t picId, uint8_t *data) {
     CHECK(mFirstPicture == NULL);
     mFirstPictureId = picId;
 
-    mFirstPicture = new uint8_t[mPictureSize];
-    memcpy(mFirstPicture, data, mPictureSize);
+    uint32_t pictureSize = mWidth * mHeight * 3 / 2;
+    mFirstPicture = new uint8_t[pictureSize];
+    memcpy(mFirstPicture, data, pictureSize);
 }
 
 void SoftAVC::drainOneOutputBuffer(int32_t picId, uint8_t* data) {
@@ -263,9 +246,17 @@
     OMX_BUFFERHEADERTYPE *header = mPicToHeaderMap.valueFor(picId);
     outHeader->nTimeStamp = header->nTimeStamp;
     outHeader->nFlags = header->nFlags;
-    outHeader->nFilledLen = mPictureSize;
-    memcpy(outHeader->pBuffer + outHeader->nOffset,
-            data, mPictureSize);
+    outHeader->nFilledLen = mWidth * mHeight * 3 / 2;
+
+    uint8_t *dst = outHeader->pBuffer + outHeader->nOffset;
+    const uint8_t *srcY = data;
+    const uint8_t *srcU = srcY + mWidth * mHeight;
+    const uint8_t *srcV = srcU + mWidth * mHeight / 4;
+    size_t srcYStride = mWidth;
+    size_t srcUStride = mWidth / 2;
+    size_t srcVStride = srcUStride;
+    copyYV12FrameToOutputBuffer(dst, srcY, srcU, srcV, srcYStride, srcUStride, srcVStride);
+
     mPicToHeaderMap.removeItem(picId);
     delete header;
     outInfo->mOwnedByUs = false;
diff --git a/media/libstagefright/codecs/on2/h264dec/SoftAVC.h b/media/libstagefright/codecs/on2/h264dec/SoftAVC.h
index ee69926..253a406 100644
--- a/media/libstagefright/codecs/on2/h264dec/SoftAVC.h
+++ b/media/libstagefright/codecs/on2/h264dec/SoftAVC.h
@@ -55,8 +55,6 @@
 
     size_t mInputBufferCount;
 
-    uint32_t mPictureSize;
-
     uint8_t *mFirstPicture;
     int32_t mFirstPictureId;
 
@@ -75,8 +73,7 @@
     void drainAllOutputBuffers(bool eos);
     void drainOneOutputBuffer(int32_t picId, uint8_t *data);
     void saveFirstOutputBuffer(int32_t pidId, uint8_t *data);
-    bool handleCropRectEvent(const CropParams* crop);
-    bool handlePortSettingChangeEvent(const H264SwDecInfo *info);
+    bool handleCropChange(const H264SwDecInfo& decInfo);
 
     DISALLOW_EVIL_CONSTRUCTORS(SoftAVC);
 };
diff --git a/media/libstagefright/foundation/AMessage.cpp b/media/libstagefright/foundation/AMessage.cpp
index d268aa4..bc3e3fb 100644
--- a/media/libstagefright/foundation/AMessage.cpp
+++ b/media/libstagefright/foundation/AMessage.cpp
@@ -14,6 +14,11 @@
  * limitations under the License.
  */
 
+#define LOG_TAG "AMessage"
+//#define LOG_NDEBUG 0
+//#define DUMP_STATS
+#include <cutils/log.h>
+
 #include "AMessage.h"
 
 #include <ctype.h>
@@ -60,12 +65,14 @@
 void AMessage::clear() {
     for (size_t i = 0; i < mNumItems; ++i) {
         Item *item = &mItems[i];
-        freeItem(item);
+        delete[] item->mName;
+        item->mName = NULL;
+        freeItemValue(item);
     }
     mNumItems = 0;
 }
 
-void AMessage::freeItem(Item *item) {
+void AMessage::freeItemValue(Item *item) {
     switch (item->mType) {
         case kTypeString:
         {
@@ -88,25 +95,85 @@
     }
 }
 
-AMessage::Item *AMessage::allocateItem(const char *name) {
-    name = AAtomizer::Atomize(name);
+#ifdef DUMP_STATS
+#include <utils/Mutex.h>
 
-    size_t i = 0;
-    while (i < mNumItems && mItems[i].mName != name) {
-        ++i;
+Mutex gLock;
+static int32_t gFindItemCalls = 1;
+static int32_t gDupCalls = 1;
+static int32_t gAverageNumItems = 0;
+static int32_t gAverageNumChecks = 0;
+static int32_t gAverageNumMemChecks = 0;
+static int32_t gAverageDupItems = 0;
+static int32_t gLastChecked = -1;
+
+static void reportStats() {
+    int32_t time = (ALooper::GetNowUs() / 1000);
+    if (time / 1000 != gLastChecked / 1000) {
+        gLastChecked = time;
+        ALOGI("called findItemIx %zu times (for len=%.1f i=%.1f/%.1f mem) dup %zu times (for len=%.1f)",
+                gFindItemCalls,
+                gAverageNumItems / (float)gFindItemCalls,
+                gAverageNumChecks / (float)gFindItemCalls,
+                gAverageNumMemChecks / (float)gFindItemCalls,
+                gDupCalls,
+                gAverageDupItems / (float)gDupCalls);
+        gFindItemCalls = gDupCalls = 1;
+        gAverageNumItems = gAverageNumChecks = gAverageNumMemChecks = gAverageDupItems = 0;
+        gLastChecked = time;
     }
+}
+#endif
 
+inline size_t AMessage::findItemIndex(const char *name, size_t len) const {
+#ifdef DUMP_STATS
+    size_t memchecks = 0;
+#endif
+    size_t i = 0;
+    for (; i < mNumItems; i++) {
+        if (len != mItems[i].mNameLength) {
+            continue;
+        }
+#ifdef DUMP_STATS
+        ++memchecks;
+#endif
+        if (!memcmp(mItems[i].mName, name, len)) {
+            break;
+        }
+    }
+#ifdef DUMP_STATS
+    {
+        Mutex::Autolock _l(gLock);
+        ++gFindItemCalls;
+        gAverageNumItems += mNumItems;
+        gAverageNumMemChecks += memchecks;
+        gAverageNumChecks += i;
+        reportStats();
+    }
+#endif
+    return i;
+}
+
+// assumes item's name was uninitialized or NULL
+void AMessage::Item::setName(const char *name, size_t len) {
+    mNameLength = len;
+    mName = new char[len + 1];
+    memcpy((void*)mName, name, len + 1);
+}
+
+AMessage::Item *AMessage::allocateItem(const char *name) {
+    size_t len = strlen(name);
+    size_t i = findItemIndex(name, len);
     Item *item;
 
     if (i < mNumItems) {
         item = &mItems[i];
-        freeItem(item);
+        freeItemValue(item);
     } else {
         CHECK(mNumItems < kMaxNumItems);
         i = mNumItems++;
         item = &mItems[i];
-
-        item->mName = name;
+        item->setName(name, len);
     }
 
     return item;
@@ -114,31 +181,18 @@
 
 const AMessage::Item *AMessage::findItem(
         const char *name, Type type) const {
-    name = AAtomizer::Atomize(name);
-
-    for (size_t i = 0; i < mNumItems; ++i) {
+    size_t i = findItemIndex(name, strlen(name));
+    if (i < mNumItems) {
         const Item *item = &mItems[i];
+        return item->mType == type ? item : NULL;
 
-        if (item->mName == name) {
-            return item->mType == type ? item : NULL;
-        }
     }
-
     return NULL;
 }
 
 bool AMessage::contains(const char *name) const {
-    name = AAtomizer::Atomize(name);
-
-    for (size_t i = 0; i < mNumItems; ++i) {
-        const Item *item = &mItems[i];
-
-        if (item->mName == name) {
-            return true;
-        }
-    }
-
-    return false;
+    size_t i = findItemIndex(name, strlen(name));
+    return i < mNumItems;
 }
 
 #define BASIC_TYPE(NAME,FIELDNAME,TYPENAME)                             \
@@ -297,11 +351,20 @@
     sp<AMessage> msg = new AMessage(mWhat, mTarget);
     msg->mNumItems = mNumItems;
 
+#ifdef DUMP_STATS
+    {
+        Mutex::Autolock _l(gLock);
+        ++gDupCalls;
+        gAverageDupItems += mNumItems;
+        reportStats();
+    }
+#endif
+
     for (size_t i = 0; i < mNumItems; ++i) {
         const Item *from = &mItems[i];
         Item *to = &msg->mItems[i];
 
-        to->mName = from->mName;
+        to->setName(from->mName, from->mNameLength);
         to->mType = from->mType;
 
         switch (from->mType) {
@@ -472,11 +535,11 @@
     sp<AMessage> msg = new AMessage(what);
 
     msg->mNumItems = static_cast<size_t>(parcel.readInt32());
-
     for (size_t i = 0; i < msg->mNumItems; ++i) {
         Item *item = &msg->mItems[i];
 
-        item->mName = AAtomizer::Atomize(parcel.readCString());
+        const char *name = parcel.readCString();
+        item->setName(name, strlen(name));
         item->mType = static_cast<Type>(parcel.readInt32());
 
         switch (item->mType) {
diff --git a/media/libstagefright/include/SoftVideoDecoderOMXComponent.h b/media/libstagefright/include/SoftVideoDecoderOMXComponent.h
index ee553d9..4a6ab63 100644
--- a/media/libstagefright/include/SoftVideoDecoderOMXComponent.h
+++ b/media/libstagefright/include/SoftVideoDecoderOMXComponent.h
@@ -63,7 +63,14 @@
             OMX_U32 numOutputBuffers,
             const char *mimeType);
 
-    virtual void updatePortDefinitions();
+    virtual void updatePortDefinitions(bool updateCrop = true);
+
+    void handlePortSettingsChange(
+            bool *portWillReset, uint32_t width, uint32_t height, bool cropChanged = false);
+
+    void copyYV12FrameToOutputBuffer(
+            uint8_t *dst, const uint8_t *srcY, const uint8_t *srcU, const uint8_t *srcV,
+            size_t srcYStride, size_t srcUStride, size_t srcVStride);
 
     enum {
         kInputPortIndex  = 0,
diff --git a/media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp b/media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp
index 69b572e..e533fdd 100644
--- a/media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp
+++ b/media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp
@@ -123,13 +123,15 @@
     updatePortDefinitions();
 }
 
-void SoftVideoDecoderOMXComponent::updatePortDefinitions() {
+void SoftVideoDecoderOMXComponent::updatePortDefinitions(bool updateCrop) {
     OMX_PARAM_PORTDEFINITIONTYPE *def = &editPortInfo(kInputPortIndex)->mDef;
     def->format.video.nFrameWidth = mWidth;
     def->format.video.nFrameHeight = mHeight;
     def->format.video.nStride = def->format.video.nFrameWidth;
     def->format.video.nSliceHeight = def->format.video.nFrameHeight;
 
+    def->nBufferSize = def->format.video.nFrameWidth * def->format.video.nFrameHeight * 3 / 2;
+
     def = &editPortInfo(kOutputPortIndex)->mDef;
     def->format.video.nFrameWidth = mIsAdaptive ? mAdaptiveMaxWidth : mWidth;
     def->format.video.nFrameHeight = mIsAdaptive ? mAdaptiveMaxHeight : mHeight;
@@ -140,10 +142,77 @@
             (def->format.video.nFrameWidth *
              def->format.video.nFrameHeight * 3) / 2;
 
-    mCropLeft = 0;
-    mCropTop = 0;
-    mCropWidth = mWidth;
-    mCropHeight = mHeight;
+    if (updateCrop) {
+        mCropLeft = 0;
+        mCropTop = 0;
+        mCropWidth = mWidth;
+        mCropHeight = mHeight;
+    }
+}
+
+void SoftVideoDecoderOMXComponent::handlePortSettingsChange(
+        bool *portWillReset, uint32_t width, uint32_t height, bool cropChanged) {
+    *portWillReset = false;
+    bool sizeChanged = (width != mWidth || height != mHeight);
+
+    if (sizeChanged || cropChanged) {
+        mWidth = width;
+        mHeight = height;
+
+        bool updateCrop = !cropChanged;
+        if ((sizeChanged && !mIsAdaptive)
+            || width > mAdaptiveMaxWidth
+            || height > mAdaptiveMaxHeight) {
+            if (mIsAdaptive) {
+                if (width > mAdaptiveMaxWidth) {
+                    mAdaptiveMaxWidth = width;
+                }
+                if (height > mAdaptiveMaxHeight) {
+                    mAdaptiveMaxHeight = height;
+                }
+            }
+            updatePortDefinitions(updateCrop);
+            notify(OMX_EventPortSettingsChanged, kOutputPortIndex, 0, NULL);
+            mOutputPortSettingsChange = AWAITING_DISABLED;
+            *portWillReset = true;
+        } else {
+            updatePortDefinitions(updateCrop);
+            notify(OMX_EventPortSettingsChanged, kOutputPortIndex,
+                   OMX_IndexConfigCommonOutputCrop, NULL);
+        }
+    }
+}
+
+void SoftVideoDecoderOMXComponent::copyYV12FrameToOutputBuffer(
+        uint8_t *dst, const uint8_t *srcY, const uint8_t *srcU, const uint8_t *srcV,
+        size_t srcYStride, size_t srcUStride, size_t srcVStride) {
+    size_t dstYStride = mIsAdaptive ? mAdaptiveMaxWidth : mWidth;
+    size_t dstUVStride = dstYStride / 2;
+    size_t dstHeight = mIsAdaptive ? mAdaptiveMaxHeight : mHeight;
+
+    for (size_t i = 0; i < dstHeight; ++i) {
+        if (i < mHeight) {
+            memcpy(dst, srcY, mWidth);
+            srcY += srcYStride;
+        }
+        dst += dstYStride;
+    }
+
+    for (size_t i = 0; i < dstHeight / 2; ++i) {
+        if (i < mHeight / 2) {
+            memcpy(dst, srcU, mWidth / 2);
+            srcU += srcUStride;
+        }
+        dst += dstUVStride;
+    }
+
+    for (size_t i = 0; i < dstHeight / 2; ++i) {
+        if (i < mHeight / 2) {
+            memcpy(dst, srcV, mWidth / 2);
+            srcV += srcVStride;
+        }
+        dst += dstUVStride;
+    }
 }
 
 OMX_ERRORTYPE SoftVideoDecoderOMXComponent::internalGetParameter(
diff --git a/services/audioflinger/PatchPanel.cpp b/services/audioflinger/PatchPanel.cpp
index 2d0a25f..7544052 100644
--- a/services/audioflinger/PatchPanel.cpp
+++ b/services/audioflinger/PatchPanel.cpp
@@ -593,10 +593,10 @@
                         status = BAD_VALUE;
                         break;
                     }
-                    status = thread->sendReleaseAudioPatchConfigEvent(mPatches[index]->mHalHandle);
+                    status = thread->sendReleaseAudioPatchConfigEvent(removedPatch->mHalHandle);
                 } else {
                     audio_hw_device_t *hwDevice = audioHwDevice->hwDevice();
-                    status = hwDevice->release_audio_patch(hwDevice, mPatches[index]->mHalHandle);
+                    status = hwDevice->release_audio_patch(hwDevice, removedPatch->mHalHandle);
                 }
             } else {
                 sp<ThreadBase> thread = audioflinger->checkRecordThread_l(
@@ -632,7 +632,7 @@
             }
             AudioHwDevice *audioHwDevice = audioflinger->mAudioHwDevs.valueAt(index);
             if (audioHwDevice->version() >= AUDIO_DEVICE_API_VERSION_3_0) {
-                status = thread->sendReleaseAudioPatchConfigEvent(mPatches[index]->mHalHandle);
+                status = thread->sendReleaseAudioPatchConfigEvent(removedPatch->mHalHandle);
             } else {
                 AudioParameter param;
                 param.addInt(String8(AUDIO_PARAMETER_STREAM_ROUTING), 0);
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 942bff6..97b1753 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -662,12 +662,14 @@
                     binder,
                     getWakeLockTag(),
                     String16("media"),
-                    uid);
+                    uid,
+                    true /* FIXME force oneway contrary to .aidl */);
         } else {
             status = mPowerManager->acquireWakeLock(POWERMANAGER_PARTIAL_WAKE_LOCK,
                     binder,
                     getWakeLockTag(),
-                    String16("media"));
+                    String16("media"),
+                    true /* FIXME force oneway contrary to .aidl */);
         }
         if (status == NO_ERROR) {
             mWakeLockToken = binder;
@@ -687,7 +689,8 @@
     if (mWakeLockToken != 0) {
         ALOGV("releaseWakeLock_l() %s", mName);
         if (mPowerManager != 0) {
-            mPowerManager->releaseWakeLock(mWakeLockToken, 0);
+            mPowerManager->releaseWakeLock(mWakeLockToken, 0,
+                    true /* FIXME force oneway contrary to .aidl */);
         }
         mWakeLockToken.clear();
     }
@@ -723,7 +726,8 @@
     if (mPowerManager != 0) {
         sp<IBinder> binder = new BBinder();
         status_t status;
-        status = mPowerManager->updateWakeLockUids(mWakeLockToken, uids.size(), uids.array());
+        status = mPowerManager->updateWakeLockUids(mWakeLockToken, uids.size(), uids.array(),
+                    true /* FIXME force oneway contrary to .aidl */);
         ALOGV("acquireWakeLock_l() %s status %d", mName, status);
     }
 }
diff --git a/services/audiopolicy/AudioPolicyManager.cpp b/services/audiopolicy/AudioPolicyManager.cpp
index 084c853..22c4e04 100644
--- a/services/audiopolicy/AudioPolicyManager.cpp
+++ b/services/audiopolicy/AudioPolicyManager.cpp
@@ -64,6 +64,7 @@
 const StringToEnum sDeviceNameToEnumTable[] = {
     STRING_TO_ENUM(AUDIO_DEVICE_OUT_EARPIECE),
     STRING_TO_ENUM(AUDIO_DEVICE_OUT_SPEAKER),
+    STRING_TO_ENUM(AUDIO_DEVICE_OUT_SPEAKER_SAFE),
     STRING_TO_ENUM(AUDIO_DEVICE_OUT_WIRED_HEADSET),
     STRING_TO_ENUM(AUDIO_DEVICE_OUT_WIRED_HEADPHONE),
     STRING_TO_ENUM(AUDIO_DEVICE_OUT_BLUETOOTH_SCO),
@@ -485,7 +486,9 @@
         // request to reuse existing output stream if one is already opened to reach the RX device
         SortedVector<audio_io_handle_t> outputs =
                                 getOutputsForDevice(rxDevice, mOutputs);
-        audio_io_handle_t output = selectOutput(outputs, AUDIO_OUTPUT_FLAG_NONE);
+        audio_io_handle_t output = selectOutput(outputs,
+                                                AUDIO_OUTPUT_FLAG_NONE,
+                                                AUDIO_FORMAT_INVALID);
         if (output != AUDIO_IO_HANDLE_NONE) {
             sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(output);
             ALOG_ASSERT(!outputDesc->isDuplicated(),
@@ -524,7 +527,9 @@
 
         SortedVector<audio_io_handle_t> outputs =
                                 getOutputsForDevice(AUDIO_DEVICE_OUT_TELEPHONY_TX, mOutputs);
-        audio_io_handle_t output = selectOutput(outputs, AUDIO_OUTPUT_FLAG_NONE);
+        audio_io_handle_t output = selectOutput(outputs,
+                                                AUDIO_OUTPUT_FLAG_NONE,
+                                                AUDIO_FORMAT_INVALID);
         // request to reuse existing output stream if one is already opened to reach the TX
         // path output device
         if (output != AUDIO_IO_HANDLE_NONE) {
@@ -1016,7 +1021,9 @@
         // routing change will happen when startOutput() will be called
         SortedVector<audio_io_handle_t> outputs = getOutputsForDevice(device, mOutputs);
 
-        output = selectOutput(outputs, flags);
+        // at this stage we should ignore the DIRECT flag as no direct output could be found earlier
+        flags = (audio_output_flags_t)(flags & ~AUDIO_OUTPUT_FLAG_DIRECT);
+        output = selectOutput(outputs, flags, format);
     }
     ALOGW_IF((output == 0), "getOutput() could not find output for stream %d, samplingRate %d,"
             "format %d, channels %x, flags %x", stream, samplingRate, format, channelMask, flags);
@@ -1027,7 +1034,8 @@
 }
 
 audio_io_handle_t AudioPolicyManager::selectOutput(const SortedVector<audio_io_handle_t>& outputs,
-                                                       audio_output_flags_t flags)
+                                                       audio_output_flags_t flags,
+                                                       audio_format_t format)
 {
     // select one output among several that provide a path to a particular device or set of
     // devices (the list was previously build by getOutputsForDevice()).
@@ -1050,6 +1058,17 @@
     for (size_t i = 0; i < outputs.size(); i++) {
         sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(outputs[i]);
         if (!outputDesc->isDuplicated()) {
+            // if a valid format is specified, skip output if not compatible
+            if (format != AUDIO_FORMAT_INVALID) {
+                if (outputDesc->mFlags & AUDIO_OUTPUT_FLAG_DIRECT) {
+                    if (format != outputDesc->mFormat) {
+                        continue;
+                    }
+                } else if (!audio_is_linear_pcm(format)) {
+                    continue;
+                }
+            }
+
             int commonFlags = popcount(outputDesc->mProfile->mFlags & flags);
             if (commonFlags > maxCommonFlags) {
                 outputFlags = outputs[i];
@@ -2307,7 +2326,9 @@
                                                                 mOutputs);
                     // if the sink device is reachable via an opened output stream, request to go via
                     // this output stream by adding a second source to the patch description
-                    audio_io_handle_t output = selectOutput(outputs, AUDIO_OUTPUT_FLAG_NONE);
+                    audio_io_handle_t output = selectOutput(outputs,
+                                                            AUDIO_OUTPUT_FLAG_NONE,
+                                                            AUDIO_FORMAT_INVALID);
                     if (output != AUDIO_IO_HANDLE_NONE) {
                         sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(output);
                         if (outputDesc->isDuplicated()) {
@@ -3804,6 +3825,14 @@
             break;
         }
     }
+
+    /*Filter SPEAKER_SAFE out of results, as AudioService doesn't know about it
+      and doesn't really need to.*/
+    if (devices & AUDIO_DEVICE_OUT_SPEAKER_SAFE) {
+        devices |= AUDIO_DEVICE_OUT_SPEAKER;
+        devices &= ~AUDIO_DEVICE_OUT_SPEAKER_SAFE;
+    }
+
     return devices;
 }
 
@@ -3906,12 +3935,20 @@
             //   the isStreamActive() method only informs about the activity of a stream, not
             //   if it's for local playback. Note also that we use the same delay between both tests
             device = getDeviceForStrategy(STRATEGY_SONIFICATION, false /*fromCache*/);
+            //user "safe" speaker if available instead of normal speaker to avoid triggering
+            //other acoustic safety mechanisms for notification
+            if (device == AUDIO_DEVICE_OUT_SPEAKER && (availableOutputDeviceTypes & AUDIO_DEVICE_OUT_SPEAKER_SAFE))
+                device = AUDIO_DEVICE_OUT_SPEAKER_SAFE;
         } else if (isStreamActive(AUDIO_STREAM_MUSIC, SONIFICATION_RESPECTFUL_AFTER_MUSIC_DELAY)) {
             // while media is playing (or has recently played), use the same device
             device = getDeviceForStrategy(STRATEGY_MEDIA, false /*fromCache*/);
         } else {
             // when media is not playing anymore, fall back on the sonification behavior
             device = getDeviceForStrategy(STRATEGY_SONIFICATION, false /*fromCache*/);
+            //user "safe" speaker if available instead of normal speaker to avoid triggering
+            //other acoustic safety mechanisms for notification
+            if (device == AUDIO_DEVICE_OUT_SPEAKER && (availableOutputDeviceTypes & AUDIO_DEVICE_OUT_SPEAKER_SAFE))
+                device = AUDIO_DEVICE_OUT_SPEAKER_SAFE;
         }
 
         break;
@@ -4648,6 +4685,10 @@
         }
     }
 
+    /*SPEAKER_SAFE is an alias of SPEAKER for purposes of volume control*/
+    if (device == AUDIO_DEVICE_OUT_SPEAKER_SAFE)
+        device = AUDIO_DEVICE_OUT_SPEAKER;
+
     ALOGW_IF(popcount(device) != 1,
             "getDeviceForVolume() invalid device combination: %08x",
             device);
@@ -6039,14 +6080,26 @@
         return 0;
     }
 
+    // For direct outputs, pick minimum sampling rate: this helps ensuring that the
+    // channel count / sampling rate combination chosen will be supported by the connected
+    // sink
+    if ((mType == AUDIO_PORT_TYPE_MIX) && (mRole == AUDIO_PORT_ROLE_SOURCE) &&
+            (mFlags & (AUDIO_OUTPUT_FLAG_DIRECT | AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD))) {
+        uint32_t samplingRate = UINT_MAX;
+        for (size_t i = 0; i < mSamplingRates.size(); i ++) {
+            if ((mSamplingRates[i] < samplingRate) && (mSamplingRates[i] > 0)) {
+                samplingRate = mSamplingRates[i];
+            }
+        }
+        return (samplingRate == UINT_MAX) ? 0 : samplingRate;
+    }
+
     uint32_t samplingRate = 0;
     uint32_t maxRate = MAX_MIXER_SAMPLING_RATE;
 
     // For mixed output and inputs, use max mixer sampling rates. Do not
     // limit sampling rate otherwise
-    if ((mType != AUDIO_PORT_TYPE_MIX) ||
-            ((mRole == AUDIO_PORT_ROLE_SOURCE) &&
-            (mFlags & (AUDIO_OUTPUT_FLAG_DIRECT | AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)))) {
+    if (mType != AUDIO_PORT_TYPE_MIX) {
         maxRate = UINT_MAX;
     }
     for (size_t i = 0; i < mSamplingRates.size(); i ++) {
@@ -6063,16 +6116,35 @@
     if (mChannelMasks.size() == 1 && mChannelMasks[0] == 0) {
         return AUDIO_CHANNEL_NONE;
     }
-
     audio_channel_mask_t channelMask = AUDIO_CHANNEL_NONE;
+
+    // For direct outputs, pick minimum channel count: this helps ensuring that the
+    // channel count / sampling rate combination chosen will be supported by the connected
+    // sink
+    if ((mType == AUDIO_PORT_TYPE_MIX) && (mRole == AUDIO_PORT_ROLE_SOURCE) &&
+            (mFlags & (AUDIO_OUTPUT_FLAG_DIRECT | AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD))) {
+        uint32_t channelCount = UINT_MAX;
+        for (size_t i = 0; i < mChannelMasks.size(); i ++) {
+            uint32_t cnlCount;
+            if (mUseInChannelMask) {
+                cnlCount = audio_channel_count_from_in_mask(mChannelMasks[i]);
+            } else {
+                cnlCount = audio_channel_count_from_out_mask(mChannelMasks[i]);
+            }
+            if ((cnlCount < channelCount) && (cnlCount > 0)) {
+                channelMask = mChannelMasks[i];
+                channelCount = cnlCount;
+            }
+        }
+        return channelMask;
+    }
+
     uint32_t channelCount = 0;
     uint32_t maxCount = MAX_MIXER_CHANNEL_COUNT;
 
     // For mixed output and inputs, use max mixer channel count. Do not
     // limit channel count otherwise
-    if ((mType != AUDIO_PORT_TYPE_MIX) ||
-            ((mRole == AUDIO_PORT_ROLE_SOURCE) &&
-            (mFlags & (AUDIO_OUTPUT_FLAG_DIRECT | AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)))) {
+    if (mType != AUDIO_PORT_TYPE_MIX) {
         maxCount = UINT_MAX;
     }
     for (size_t i = 0; i < mChannelMasks.size(); i ++) {
@@ -6084,6 +6156,7 @@
         }
         if ((cnlCount > channelCount) && (cnlCount <= maxCount)) {
             channelMask = mChannelMasks[i];
+            channelCount = cnlCount;
         }
     }
     return channelMask;
diff --git a/services/audiopolicy/AudioPolicyManager.h b/services/audiopolicy/AudioPolicyManager.h
index 57e015e..da0d95d 100644
--- a/services/audiopolicy/AudioPolicyManager.h
+++ b/services/audiopolicy/AudioPolicyManager.h
@@ -713,7 +713,8 @@
                                             uint32_t delayMs);
 
         audio_io_handle_t selectOutput(const SortedVector<audio_io_handle_t>& outputs,
-                                       audio_output_flags_t flags);
+                                       audio_output_flags_t flags,
+                                       audio_format_t format);
         // samplingRate parameter is an in/out and so may be modified
         sp<IOProfile> getInputProfile(audio_devices_t device,
                                    uint32_t& samplingRate,
diff --git a/services/camera/libcameraservice/Android.mk b/services/camera/libcameraservice/Android.mk
index 9d6ab23..e184d97 100644
--- a/services/camera/libcameraservice/Android.mk
+++ b/services/camera/libcameraservice/Android.mk
@@ -34,6 +34,7 @@
     api1/client2/JpegProcessor.cpp \
     api1/client2/CallbackProcessor.cpp \
     api1/client2/ZslProcessor.cpp \
+    api1/client2/ZslProcessorInterface.cpp \
     api1/client2/BurstCapture.cpp \
     api1/client2/JpegCompressor.cpp \
     api1/client2/CaptureSequencer.cpp \
diff --git a/services/camera/libcameraservice/api1/Camera2Client.cpp b/services/camera/libcameraservice/api1/Camera2Client.cpp
index bc40971..36a93b2 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.cpp
+++ b/services/camera/libcameraservice/api1/Camera2Client.cpp
@@ -434,6 +434,9 @@
     mCallbackProcessor->deleteStream();
     mZslProcessor->deleteStream();
 
+    // Remove all ZSL stream state before disconnect; needed to work around b/15408128.
+    mZslProcessor->disconnect();
+
     ALOGV("Camera %d: Disconnecting device", mCameraId);
 
     mDevice->disconnect();
@@ -1088,6 +1091,22 @@
 
     res = mStreamingProcessor->startStream(StreamingProcessor::RECORD,
             outputStreams);
+    // try to reconfigure jpeg to video size if configureStreams failed
+    if (res == BAD_VALUE) {
+
+        ALOGV("%s: Camera %d: configure still size to video size before recording"
+                , __FUNCTION__, mCameraId);
+        params.overrideJpegSizeByVideoSize();
+        res = updateProcessorStream(mJpegProcessor, params);
+        if (res != OK) {
+            ALOGE("%s: Camera %d: Can't configure still image size to video size: %s (%d)",
+                    __FUNCTION__, mCameraId, strerror(-res), res);
+            return res;
+        }
+        res = mStreamingProcessor->startStream(StreamingProcessor::RECORD,
+                outputStreams);
+    }
+
     if (res != OK) {
         ALOGE("%s: Camera %d: Unable to start recording stream: %s (%d)",
                 __FUNCTION__, mCameraId, strerror(-res), res);
@@ -1127,6 +1146,7 @@
 
     mCameraService->playSound(CameraService::SOUND_RECORDING);
 
+    l.mParameters.recoverOverriddenJpegSize();
     res = startPreviewL(l.mParameters, true);
     if (res != OK) {
         ALOGE("%s: Camera %d: Unable to return to preview",
diff --git a/services/camera/libcameraservice/api1/client2/Parameters.cpp b/services/camera/libcameraservice/api1/client2/Parameters.cpp
index e7f9a78..8d00590 100644
--- a/services/camera/libcameraservice/api1/client2/Parameters.cpp
+++ b/services/camera/libcameraservice/api1/client2/Parameters.cpp
@@ -249,6 +249,9 @@
     // TODO: Pick maximum
     pictureWidth = availableJpegSizes[0].width;
     pictureHeight = availableJpegSizes[0].height;
+    pictureWidthLastSet = pictureWidth;
+    pictureHeightLastSet = pictureHeight;
+    pictureSizeOverriden = false;
 
     params.setPictureSize(pictureWidth,
             pictureHeight);
@@ -1381,8 +1384,8 @@
     // PICTURE_SIZE
     newParams.getPictureSize(&validatedParams.pictureWidth,
             &validatedParams.pictureHeight);
-    if (validatedParams.pictureWidth == pictureWidth ||
-            validatedParams.pictureHeight == pictureHeight) {
+    if (validatedParams.pictureWidth != pictureWidth ||
+            validatedParams.pictureHeight != pictureHeight) {
         Vector<Size> availablePictureSizes = getAvailableJpegSizes();
         for (i = 0; i < availablePictureSizes.size(); i++) {
             if ((availablePictureSizes[i].width ==
@@ -1798,6 +1801,7 @@
     /** Update internal parameters */
 
     *this = validatedParams;
+    updateOverriddenJpegSize();
 
     /** Update external parameters calculated from the internal ones */
 
@@ -2115,6 +2119,52 @@
     return OK;
 }
 
+status_t Parameters::overrideJpegSizeByVideoSize() {
+    if (pictureSizeOverriden) {
+        ALOGV("Picture size has been overridden. Skip overriding");
+        return OK;
+    }
+
+    pictureSizeOverriden = true;
+    pictureWidthLastSet = pictureWidth;
+    pictureHeightLastSet = pictureHeight;
+    pictureWidth = videoWidth;
+    pictureHeight = videoHeight;
+    // This change of picture size is invisible to app layer.
+    // Do not update app visible params
+    return OK;
+}
+
+status_t Parameters::updateOverriddenJpegSize() {
+    if (!pictureSizeOverriden) {
+        ALOGV("Picture size has not been overridden. Skip checking");
+        return OK;
+    }
+
+    pictureWidthLastSet = pictureWidth;
+    pictureHeightLastSet = pictureHeight;
+
+    if (pictureWidth <= videoWidth && pictureHeight <= videoHeight) {
+        // Picture size is now smaller than video size. No need to override anymore
+        return recoverOverriddenJpegSize();
+    }
+
+    pictureWidth = videoWidth;
+    pictureHeight = videoHeight;
+
+    return OK;
+}
+
+status_t Parameters::recoverOverriddenJpegSize() {
+    if (!pictureSizeOverriden) {
+        ALOGV("Picture size has not been overridden. Skip recovering");
+        return OK;
+    }
+    pictureSizeOverriden = false;
+    pictureWidth = pictureWidthLastSet;
+    pictureHeight = pictureHeightLastSet;
+    return OK;
+}
 
 const char* Parameters::getStateName(State state) {
 #define CASE_ENUM_TO_CHAR(x) case x: return(#x); break;
diff --git a/services/camera/libcameraservice/api1/client2/Parameters.h b/services/camera/libcameraservice/api1/client2/Parameters.h
index d9d33c4..5e6e6ab 100644
--- a/services/camera/libcameraservice/api1/client2/Parameters.h
+++ b/services/camera/libcameraservice/api1/client2/Parameters.h
@@ -52,6 +52,9 @@
     int previewTransform; // set by CAMERA_CMD_SET_DISPLAY_ORIENTATION
 
     int pictureWidth, pictureHeight;
+    // Store the picture size before they are overriden by video snapshot
+    int pictureWidthLastSet, pictureHeightLastSet;
+    bool pictureSizeOverriden;
 
     int32_t jpegThumbSize[2];
     uint8_t jpegQuality, jpegThumbQuality;
@@ -253,6 +256,12 @@
     // Add/update JPEG entries in metadata
     status_t updateRequestJpeg(CameraMetadata *request) const;
 
+    /* Helper functions to override jpeg size for video snapshot */
+    // Override jpeg size by video size. Called during startRecording.
+    status_t overrideJpegSizeByVideoSize();
+    // Recover overridden jpeg size.  Called during stopRecording.
+    status_t recoverOverriddenJpegSize();
+
     // Calculate the crop region rectangle based on current stream sizes
     struct CropRegion {
         float left;
@@ -348,6 +357,12 @@
     // Get max size (from the size array) that matches the given aspect ratio.
     Size getMaxSizeForRatio(float ratio, const int32_t* sizeArray, size_t count);
 
+    // Helper function for overriding jpeg size for video snapshot
+    // Check if overridden jpeg size needs to be updated after Parameters::set.
+    // The behavior of this function is tailored to the implementation of Parameters::set.
+    // Do not use this function for other purpose.
+    status_t updateOverriddenJpegSize();
+
     struct StreamConfiguration {
         int32_t format;
         int32_t width;
diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp b/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp
index 8fb876e..bb72206 100644
--- a/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp
@@ -48,6 +48,7 @@
         mDevice(client->getCameraDevice()),
         mSequencer(sequencer),
         mId(client->getCameraId()),
+        mDeleted(false),
         mZslBufferAvailable(false),
         mZslStreamId(NO_STREAM),
         mZslReprocessStreamId(NO_STREAM),
@@ -62,7 +63,7 @@
 
 ZslProcessor::~ZslProcessor() {
     ALOGV("%s: Exit", __FUNCTION__);
-    deleteStream();
+    disconnect();
 }
 
 void ZslProcessor::onFrameAvailable() {
@@ -153,7 +154,7 @@
                     mId, strerror(-res), res);
             return res;
         }
-        if (currentWidth != (uint32_t)params.fastInfo.arrayWidth ||
+        if (mDeleted || currentWidth != (uint32_t)params.fastInfo.arrayWidth ||
                 currentHeight != (uint32_t)params.fastInfo.arrayHeight) {
             res = device->deleteReprocessStream(mZslReprocessStreamId);
             if (res != OK) {
@@ -175,6 +176,8 @@
         }
     }
 
+    mDeleted = false;
+
     if (mZslStreamId == NO_STREAM) {
         // Create stream for HAL production
         // TODO: Sort out better way to select resolution for ZSL
@@ -209,6 +212,14 @@
 
 status_t ZslProcessor::deleteStream() {
     ATRACE_CALL();
+    Mutex::Autolock l(mInputMutex);
+    // WAR(b/15408128): do not delete stream unless client is being disconnected.
+    mDeleted = true;
+    return OK;
+}
+
+status_t ZslProcessor::disconnect() {
+    ATRACE_CALL();
     status_t res;
 
     Mutex::Autolock l(mInputMutex);
diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessor.h b/services/camera/libcameraservice/api1/client2/ZslProcessor.h
index f4cf0c8..b6533cf 100644
--- a/services/camera/libcameraservice/api1/client2/ZslProcessor.h
+++ b/services/camera/libcameraservice/api1/client2/ZslProcessor.h
@@ -67,6 +67,7 @@
 
     status_t updateStream(const Parameters &params);
     status_t deleteStream();
+    status_t disconnect();
     int getStreamId() const;
 
     status_t pushToReprocess(int32_t requestId);
@@ -86,6 +87,8 @@
     wp<CaptureSequencer> mSequencer;
     int mId;
 
+    bool mDeleted;
+
     mutable Mutex mInputMutex;
     bool mZslBufferAvailable;
     Condition mZslBufferAvailableSignal;
diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessorInterface.cpp b/services/camera/libcameraservice/api1/client2/ZslProcessorInterface.cpp
new file mode 100644
index 0000000..9efeaba
--- /dev/null
+++ b/services/camera/libcameraservice/api1/client2/ZslProcessorInterface.cpp
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ZslProcessorInterface.h"
+
+namespace android {
+namespace camera2 {
+
+status_t ZslProcessorInterface::disconnect() {
+    return OK;
+}
+
+}; //namespace camera2
+}; //namespace android
+
diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessorInterface.h b/services/camera/libcameraservice/api1/client2/ZslProcessorInterface.h
index 183c0c2..9e266e7 100644
--- a/services/camera/libcameraservice/api1/client2/ZslProcessorInterface.h
+++ b/services/camera/libcameraservice/api1/client2/ZslProcessorInterface.h
@@ -19,6 +19,8 @@
 
 #include <utils/Errors.h>
 #include <utils/RefBase.h>
+#include <utils/String16.h>
+#include <utils/Vector.h>
 
 namespace android {
 namespace camera2 {
@@ -37,6 +39,9 @@
     // Delete the underlying CameraDevice streams
     virtual status_t deleteStream() = 0;
 
+    // Clear any additional state necessary before the CameraDevice is disconnected
+    virtual status_t disconnect();
+
     /**
      * Submits a ZSL capture request (id = requestId)
      *
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index 6f78db5..fafe349 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -601,10 +601,18 @@
 
     if (mStatus == STATUS_UNCONFIGURED || mNeedConfig) {
         res = configureStreamsLocked();
+        // Stream configuration failed due to unsupported configuration.
+        // Device back to unconfigured state. Client might try other configuraitons
+        if (res == BAD_VALUE && mStatus == STATUS_UNCONFIGURED) {
+            CLOGE("No streams configured");
+            return NULL;
+        }
+        // Stream configuration failed for other reason. Fatal.
         if (res != OK) {
             SET_ERR_L("Can't set up streams: %s (%d)", strerror(-res), res);
             return NULL;
         }
+        // Stream configuration successfully configure to empty stream configuration.
         if (mStatus == STATUS_UNCONFIGURED) {
             CLOGE("No streams configured");
             return NULL;
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.cpp b/services/camera/libcameraservice/device3/Camera3Stream.cpp
index 29ce38c..3c0e908 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Stream.cpp
@@ -233,8 +233,7 @@
     camera3_stream::usage = oldUsage;
     camera3_stream::max_buffers = oldMaxBuffers;
 
-    mState = STATE_CONSTRUCTED;
-
+    mState = (mState == STATE_IN_RECONFIG) ? STATE_CONFIGURED : STATE_CONSTRUCTED;
     return OK;
 }