Merge "stagefright: add adaptive playback support to SoftMPEG decoder." into lmp-dev
diff --git a/include/media/AudioTrack.h b/include/media/AudioTrack.h
index a3cc396..72e51f9 100644
--- a/include/media/AudioTrack.h
+++ b/include/media/AudioTrack.h
@@ -430,7 +430,7 @@
      *  - NO_ERROR: successful operation
      *  - BAD_VALUE:  position is NULL
      */
-            status_t    getPosition(uint32_t *position) const;
+            status_t    getPosition(uint32_t *position);
 
     /* For static buffer mode only, this returns the current playback position in frames
      * relative to start of buffer.  It is analogous to the position units used by
@@ -581,6 +581,7 @@
      * if you need a high resolution mapping between frame position and presentation time,
      * consider implementing that at application level, based on the low resolution timestamps.
      * Returns NO_ERROR if timestamp is valid.
+     * The timestamp parameter is undefined on return, if status is not NO_ERROR.
      */
             status_t    getTimestamp(AudioTimestamp& timestamp);
 
@@ -639,7 +640,7 @@
 
             // caller must hold lock on mLock for all _l methods
 
-            status_t createTrack_l(size_t epoch);
+            status_t createTrack_l();
 
             // can only be called when mState != STATE_ACTIVE
             void flush_l();
@@ -659,6 +660,9 @@
             bool     isDirect_l() const
                 { return (mFlags & AUDIO_OUTPUT_FLAG_DIRECT) != 0; }
 
+            // increment mPosition by the delta of mServer, and return new value of mPosition
+            uint32_t updateAndGetPosition_l();
+
     // Next 4 fields may be changed if IAudioTrack is re-created, but always != 0
     sp<IAudioTrack>         mAudioTrack;
     sp<IMemory>             mCblkMemory;
@@ -731,6 +735,18 @@
     bool                    mMarkerReached;
     uint32_t                mNewPosition;           // in frames
     uint32_t                mUpdatePeriod;          // in frames, zero means no EVENT_NEW_POS
+    uint32_t                mServer;                // in frames, last known mProxy->getPosition()
+                                                    // which is count of frames consumed by server,
+                                                    // reset by new IAudioTrack,
+                                                    // whether it is reset by stop() is TBD
+    uint32_t                mPosition;              // in frames, like mServer except continues
+                                                    // monotonically after new IAudioTrack,
+                                                    // and could be easily widened to uint64_t
+    uint32_t                mReleased;              // in frames, count of frames released to server
+                                                    // but not necessarily consumed by server,
+                                                    // reset by stop() but continues monotonically
+                                                    // after new IAudioTrack to restore mPosition,
+                                                    // and could be easily widened to uint64_t
 
     audio_output_flags_t    mFlags;
         // const after set(), except for bits AUDIO_OUTPUT_FLAG_FAST and AUDIO_OUTPUT_FLAG_OFFLOAD.
diff --git a/include/media/IAudioTrack.h b/include/media/IAudioTrack.h
index 5c8a484..619ac78 100644
--- a/include/media/IAudioTrack.h
+++ b/include/media/IAudioTrack.h
@@ -88,7 +88,7 @@
     /* Send parameters to the audio hardware */
     virtual status_t    setParameters(const String8& keyValuePairs) = 0;
 
-    /* Return NO_ERROR if timestamp is valid */
+    /* Return NO_ERROR if timestamp is valid.  timestamp is undefined otherwise. */
     virtual status_t    getTimestamp(AudioTimestamp& timestamp) = 0;
 
     /* Signal the playback thread for a change in control block */
diff --git a/include/media/nbaio/NBAIO.h b/include/media/nbaio/NBAIO.h
index be0c15b..d422576 100644
--- a/include/media/nbaio/NBAIO.h
+++ b/include/media/nbaio/NBAIO.h
@@ -227,7 +227,7 @@
 
     // Returns NO_ERROR if a timestamp is available.  The timestamp includes the total number
     // of frames presented to an external observer, together with the value of CLOCK_MONOTONIC
-    // as of this presentation count.
+    // as of this presentation count.  The timestamp parameter is undefined if error is returned.
     virtual status_t getTimestamp(AudioTimestamp& timestamp) { return INVALID_OPERATION; }
 
 protected:
diff --git a/media/libmedia/AudioTrack.cpp b/media/libmedia/AudioTrack.cpp
index d87e6f5..ff7da83 100644
--- a/media/libmedia/AudioTrack.cpp
+++ b/media/libmedia/AudioTrack.cpp
@@ -398,7 +398,7 @@
     }
 
     // create the IAudioTrack
-    status = createTrack_l(0 /*epoch*/);
+    status = createTrack_l();
 
     if (status != NO_ERROR) {
         if (mAudioTrackThread != 0) {
@@ -417,6 +417,9 @@
     mMarkerReached = false;
     mNewPosition = 0;
     mUpdatePeriod = 0;
+    mServer = 0;
+    mPosition = 0;
+    mReleased = 0;
     AudioSystem::acquireAudioSessionId(mSessionId, mClientPid);
     mSequence = 1;
     mObservedSequence = mSequence;
@@ -443,14 +446,16 @@
     } else {
         mState = STATE_ACTIVE;
     }
+    (void) updateAndGetPosition_l();
     if (previousState == STATE_STOPPED || previousState == STATE_FLUSHED) {
         // reset current position as seen by client to 0
-        mProxy->setEpoch(mProxy->getEpoch() - mProxy->getPosition());
+        mPosition = 0;
+        mReleased = 0;
         // force refresh of remaining frames by processAudioBuffer() as last
         // write before stop could be partial.
         mRefreshRemaining = true;
     }
-    mNewPosition = mProxy->getPosition() + mUpdatePeriod;
+    mNewPosition = mPosition + mUpdatePeriod;
     int32_t flags = android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags);
 
     sp<AudioTrackThread> t = mAudioTrackThread;
@@ -709,7 +714,7 @@
 {
     // FIXME If setting a loop also sets position to start of loop, then
     //       this is correct.  Otherwise it should be removed.
-    mNewPosition = mProxy->getPosition() + mUpdatePeriod;
+    mNewPosition = updateAndGetPosition_l() + mUpdatePeriod;
     mLoopPeriod = loopCount != 0 ? loopEnd - loopStart : 0;
     mStaticProxy->setLoop(loopStart, loopEnd, loopCount);
 }
@@ -751,7 +756,7 @@
     }
 
     AutoMutex lock(mLock);
-    mNewPosition = mProxy->getPosition() + updatePeriod;
+    mNewPosition = updateAndGetPosition_l() + updatePeriod;
     mUpdatePeriod = updatePeriod;
 
     return NO_ERROR;
@@ -791,7 +796,7 @@
     if (mState == STATE_ACTIVE) {
         return INVALID_OPERATION;
     }
-    mNewPosition = mProxy->getPosition() + mUpdatePeriod;
+    mNewPosition = updateAndGetPosition_l() + mUpdatePeriod;
     mLoopPeriod = 0;
     // FIXME Check whether loops and setting position are incompatible in old code.
     // If we use setLoop for both purposes we lose the capability to set the position while looping.
@@ -800,7 +805,7 @@
     return NO_ERROR;
 }
 
-status_t AudioTrack::getPosition(uint32_t *position) const
+status_t AudioTrack::getPosition(uint32_t *position)
 {
     if (position == NULL) {
         return BAD_VALUE;
@@ -823,8 +828,8 @@
         *position = dspFrames;
     } else {
         // IAudioTrack::stop() isn't synchronous; we don't know when presentation completes
-        *position = (mState == STATE_STOPPED || mState == STATE_FLUSHED) ? 0 :
-                mProxy->getPosition();
+        *position = (mState == STATE_STOPPED || mState == STATE_FLUSHED) ?
+                0 : updateAndGetPosition_l();
     }
     return NO_ERROR;
 }
@@ -881,7 +886,7 @@
 // -------------------------------------------------------------------------
 
 // must be called with mLock held
-status_t AudioTrack::createTrack_l(size_t epoch)
+status_t AudioTrack::createTrack_l()
 {
     status_t status;
     const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
@@ -1184,7 +1189,6 @@
     mProxy->setVolumeLR(GAIN_MINIFLOAT_PACKED_UNITY);
     mProxy->setSendLevel(mSendLevel);
     mProxy->setSampleRate(mSampleRate);
-    mProxy->setEpoch(epoch);
     mProxy->setMinimum(mNotificationFramesAct);
 
     mDeathNotifier = new DeathNotifier(this);
@@ -1319,6 +1323,7 @@
     buffer.mRaw = audioBuffer->raw;
 
     AutoMutex lock(mLock);
+    mReleased += stepCount;
     mInUnderrun = false;
     mProxy->releaseBuffer(&buffer);
 
@@ -1531,7 +1536,7 @@
     }
 
     // Get current position of server
-    size_t position = mProxy->getPosition();
+    size_t position = updateAndGetPosition_l();
 
     // Manage marker callback
     bool markerReached = false;
@@ -1796,14 +1801,18 @@
         return DEAD_OBJECT;
     }
 
-    // if the new IAudioTrack is created, createTrack_l() will modify the
+    // save the old static buffer position
+    size_t bufferPosition = mStaticProxy != NULL ? mStaticProxy->getBufferPosition() : 0;
+
+    // If a new IAudioTrack is successfully created, createTrack_l() will modify the
     // following member variables: mAudioTrack, mCblkMemory and mCblk.
-    // It will also delete the strong references on previous IAudioTrack and IMemory
+    // It will also delete the strong references on previous IAudioTrack and IMemory.
+    // If a new IAudioTrack cannot be created, the previous (dead) instance will be left intact.
+    result = createTrack_l();
 
     // take the frames that will be lost by track recreation into account in saved position
-    size_t position = mProxy->getPosition() + mProxy->getFramesFilled();
-    size_t bufferPosition = mStaticProxy != NULL ? mStaticProxy->getBufferPosition() : 0;
-    result = createTrack_l(position /*epoch*/);
+    (void) updateAndGetPosition_l();
+    mPosition = mReleased;
 
     if (result == NO_ERROR) {
         // continue playback from last known position, but
@@ -1838,6 +1847,27 @@
     return result;
 }
 
+uint32_t AudioTrack::updateAndGetPosition_l()
+{
+    // This is the sole place to read server consumed frames
+    uint32_t newServer = mProxy->getPosition();
+    int32_t delta = newServer - mServer;
+    mServer = newServer;
+    // TODO There is controversy about whether there can be "negative jitter" in server position.
+    //      This should be investigated further, and if possible, it should be addressed.
+    //      A more definite failure mode is infrequent polling by client.
+    //      One could call (void)getPosition_l() in releaseBuffer(),
+    //      so mReleased and mPosition are always lock-step as best possible.
+    //      That should ensure delta never goes negative for infrequent polling
+    //      unless the server has more than 2^31 frames in its buffer,
+    //      in which case the use of uint32_t for these counters has bigger issues.
+    if (delta < 0) {
+        ALOGE("detected illegal retrograde motion by the server: mServer advanced by %d", delta);
+        delta = 0;
+    }
+    return mPosition += (uint32_t) delta;
+}
+
 status_t AudioTrack::setParameters(const String8& keyValuePairs)
 {
     AutoMutex lock(mLock);
@@ -1854,9 +1884,34 @@
     if (mState != STATE_ACTIVE && mState != STATE_PAUSED) {
         return INVALID_OPERATION;
     }
+    // The presented frame count must always lag behind the consumed frame count.
+    // To avoid a race, read the presented frames first.  This ensures that presented <= consumed.
     status_t status = mAudioTrack->getTimestamp(timestamp);
     if (status == NO_ERROR) {
-        timestamp.mPosition += mProxy->getEpoch();
+        // Update the mapping between local consumed (mPosition) and server consumed (mServer)
+        (void) updateAndGetPosition_l();
+        // Server consumed (mServer) and presented both use the same server time base,
+        // and server consumed is always >= presented.
+        // The delta between these represents the number of frames in the buffer pipeline.
+        // If this delta between these is greater than the client position, it means that
+        // actually presented is still stuck at the starting line (figuratively speaking),
+        // waiting for the first frame to go by.  So we can't report a valid timestamp yet.
+        if ((uint32_t) (mServer - timestamp.mPosition) > mPosition) {
+            return INVALID_OPERATION;
+        }
+        // Convert timestamp position from server time base to client time base.
+        // TODO The following code should work OK now because timestamp.mPosition is 32-bit.
+        // But if we change it to 64-bit then this could fail.
+        // If (mPosition - mServer) can be negative then should use:
+        //   (int32_t)(mPosition - mServer)
+        timestamp.mPosition += mPosition - mServer;
+        // Immediately after a call to getPosition_l(), mPosition and
+        // mServer both represent the same frame position.  mPosition is
+        // in client's point of view, and mServer is in server's point of
+        // view.  So the difference between them is the "fudge factor"
+        // between client and server views due to stop() and/or new
+        // IAudioTrack.  And timestamp.mPosition is initially in server's
+        // point of view, so we need to apply the same fudge factor to it.
     }
     return status;
 }
diff --git a/media/libmediaplayerservice/StagefrightRecorder.cpp b/media/libmediaplayerservice/StagefrightRecorder.cpp
index e2bcb1e..b904aa8 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.cpp
+++ b/media/libmediaplayerservice/StagefrightRecorder.cpp
@@ -183,11 +183,7 @@
         return BAD_VALUE;
     }
 
-    if (ve == VIDEO_ENCODER_DEFAULT) {
-        mVideoEncoder = VIDEO_ENCODER_H263;
-    } else {
-        mVideoEncoder = ve;
-    }
+    mVideoEncoder = ve;
 
     return OK;
 }
@@ -1033,6 +1029,7 @@
     if (mAudioSource != AUDIO_SOURCE_CNT) {
         source = createAudioSource();
     } else {
+        setDefaultVideoEncoderIfNecessary();
 
         sp<MediaSource> mediaSource;
         status_t err = setupMediaSource(&mediaSource);
@@ -1074,6 +1071,7 @@
 
     if (mVideoSource < VIDEO_SOURCE_LIST_END) {
         if (mVideoEncoder != VIDEO_ENCODER_H264) {
+            ALOGE("MPEG2TS recording only supports H.264 encoding!");
             return ERROR_UNSUPPORTED;
         }
 
@@ -1108,6 +1106,12 @@
 
 void StagefrightRecorder::clipVideoFrameRate() {
     ALOGV("clipVideoFrameRate: encoder %d", mVideoEncoder);
+    if (mFrameRate == -1) {
+        mFrameRate = mEncoderProfiles->getCamcorderProfileParamByName(
+                "vid.fps", mCameraId, CAMCORDER_QUALITY_LOW);
+        ALOGW("Using default video fps %d", mFrameRate);
+    }
+
     int minFrameRate = mEncoderProfiles->getVideoEncoderParamByName(
                         "enc.vid.fps.min", mVideoEncoder);
     int maxFrameRate = mEncoderProfiles->getVideoEncoderParamByName(
@@ -1243,6 +1247,27 @@
     }
 }
 
+void StagefrightRecorder::setDefaultVideoEncoderIfNecessary() {
+    if (mVideoEncoder == VIDEO_ENCODER_DEFAULT) {
+        if (mOutputFormat == OUTPUT_FORMAT_WEBM) {
+            // default to VP8 for WEBM recording
+            mVideoEncoder = VIDEO_ENCODER_VP8;
+        } else {
+            // pick the default encoder for CAMCORDER_QUALITY_LOW
+            int videoCodec = mEncoderProfiles->getCamcorderProfileParamByName(
+                    "vid.codec", mCameraId, CAMCORDER_QUALITY_LOW);
+
+            if (videoCodec > VIDEO_ENCODER_DEFAULT &&
+                videoCodec < VIDEO_ENCODER_LIST_END) {
+                mVideoEncoder = (video_encoder)videoCodec;
+            } else {
+                // default to H.264 if camcorder profile not available
+                mVideoEncoder = VIDEO_ENCODER_H264;
+            }
+        }
+    }
+}
+
 status_t StagefrightRecorder::checkAudioEncoderCapabilities() {
     clipAudioBitRate();
     clipAudioSampleRate();
@@ -1562,6 +1587,7 @@
     }
 
     if (mVideoSource < VIDEO_SOURCE_LIST_END) {
+        setDefaultVideoEncoderIfNecessary();
 
         sp<MediaSource> mediaSource;
         err = setupMediaSource(&mediaSource);
@@ -1721,7 +1747,7 @@
     // Default parameters
     mOutputFormat  = OUTPUT_FORMAT_THREE_GPP;
     mAudioEncoder  = AUDIO_ENCODER_AMR_NB;
-    mVideoEncoder  = VIDEO_ENCODER_H263;
+    mVideoEncoder  = VIDEO_ENCODER_DEFAULT;
     mVideoWidth    = 176;
     mVideoHeight   = 144;
     mFrameRate     = -1;
diff --git a/media/libmediaplayerservice/StagefrightRecorder.h b/media/libmediaplayerservice/StagefrightRecorder.h
index 9062f30..54c38d3 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.h
+++ b/media/libmediaplayerservice/StagefrightRecorder.h
@@ -178,6 +178,7 @@
     void clipAudioSampleRate();
     void clipNumberOfAudioChannels();
     void setDefaultProfileIfNecessary();
+    void setDefaultVideoEncoderIfNecessary();
 
 
     StagefrightRecorder(const StagefrightRecorder &);
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.cpp b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
index 8e1987a..d8ed836 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
@@ -54,7 +54,8 @@
       mDrmManagerClient(NULL),
       mMetaDataSize(-1ll),
       mBitrate(-1ll),
-      mPollBufferingGeneration(0) {
+      mPollBufferingGeneration(0),
+      mPendingReadBufferTypes(0) {
     resetDataSource();
     DataSource::RegisterDefaultSniffers();
 }
@@ -169,6 +170,8 @@
             if (mAudioTrack.mSource == NULL) {
                 mAudioTrack.mIndex = i;
                 mAudioTrack.mSource = track;
+                mAudioTrack.mPackets =
+                    new AnotherPacketSource(mAudioTrack.mSource->getFormat());
 
                 if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_VORBIS)) {
                     mAudioIsVorbis = true;
@@ -180,6 +183,8 @@
             if (mVideoTrack.mSource == NULL) {
                 mVideoTrack.mIndex = i;
                 mVideoTrack.mSource = track;
+                mVideoTrack.mPackets =
+                    new AnotherPacketSource(mVideoTrack.mSource->getFormat());
 
                 // check if the source requires secure buffers
                 int32_t secure;
@@ -427,16 +432,12 @@
 
     if (mAudioTrack.mSource != NULL) {
         CHECK_EQ(mAudioTrack.mSource->start(), (status_t)OK);
-        mAudioTrack.mPackets =
-            new AnotherPacketSource(mAudioTrack.mSource->getFormat());
 
         postReadBuffer(MEDIA_TRACK_TYPE_AUDIO);
     }
 
     if (mVideoTrack.mSource != NULL) {
         CHECK_EQ(mVideoTrack.mSource->start(), (status_t)OK);
-        mVideoTrack.mPackets =
-            new AnotherPacketSource(mVideoTrack.mSource->getFormat());
 
         postReadBuffer(MEDIA_TRACK_TYPE_VIDEO);
     }
@@ -1148,15 +1149,27 @@
 }
 
 void NuPlayer::GenericSource::postReadBuffer(media_track_type trackType) {
-    sp<AMessage> msg = new AMessage(kWhatReadBuffer, id());
-    msg->setInt32("trackType", trackType);
-    msg->post();
+    Mutex::Autolock _l(mReadBufferLock);
+
+    if ((mPendingReadBufferTypes & (1 << trackType)) == 0) {
+        mPendingReadBufferTypes |= (1 << trackType);
+        sp<AMessage> msg = new AMessage(kWhatReadBuffer, id());
+        msg->setInt32("trackType", trackType);
+        msg->post();
+    }
 }
 
 void NuPlayer::GenericSource::onReadBuffer(sp<AMessage> msg) {
     int32_t tmpType;
     CHECK(msg->findInt32("trackType", &tmpType));
     media_track_type trackType = (media_track_type)tmpType;
+    {
+        // only protect the variable change, as readBuffer may
+        // take considerable time.  This may result in one extra
+        // read being processed, but that is benign.
+        Mutex::Autolock _l(mReadBufferLock);
+        mPendingReadBufferTypes &= ~(1 << trackType);
+    }
     readBuffer(trackType);
 }
 
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.h b/media/libmediaplayerservice/nuplayer/GenericSource.h
index 50ff98a..c70c48e 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.h
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.h
@@ -131,6 +131,8 @@
     off64_t mMetaDataSize;
     int64_t mBitrate;
     int32_t mPollBufferingGeneration;
+    uint32_t mPendingReadBufferTypes;
+    mutable Mutex mReadBufferLock;
 
     sp<ALooper> mLooper;
 
diff --git a/media/libstagefright/httplive/LiveSession.cpp b/media/libstagefright/httplive/LiveSession.cpp
index 7b18348..83481bc 100644
--- a/media/libstagefright/httplive/LiveSession.cpp
+++ b/media/libstagefright/httplive/LiveSession.cpp
@@ -498,7 +498,7 @@
                 break;
             }
 
-            onCheckBandwidth();
+            onCheckBandwidth(msg);
             break;
         }
 
@@ -531,6 +531,19 @@
             onSwapped(msg);
             break;
         }
+
+        case kWhatCheckSwitchDown:
+        {
+            onCheckSwitchDown();
+            break;
+        }
+
+        case kWhatSwitchDown:
+        {
+            onSwitchDown();
+            break;
+        }
+
         default:
             TRESPASS();
             break;
@@ -643,6 +656,9 @@
     // (finishDisconnect, onFinishDisconnect2)
     cancelBandwidthSwitch();
 
+    // cancel switch down monitor
+    mSwitchDownMonitor.clear();
+
     for (size_t i = 0; i < mFetcherInfos.size(); ++i) {
         mFetcherInfos.valueAt(i).mFetcher->stopAsync();
     }
@@ -919,14 +935,22 @@
             }
         }
 
-        // Consider only 80% of the available bandwidth usable.
-        bandwidthBps = (bandwidthBps * 8) / 10;
-
         // Pick the highest bandwidth stream below or equal to estimated bandwidth.
 
         index = mBandwidthItems.size() - 1;
-        while (index > 0 && mBandwidthItems.itemAt(index).mBandwidth
-                                > (size_t)bandwidthBps) {
+        while (index > 0) {
+            // consider only 80% of the available bandwidth, but if we are switching up,
+            // be even more conservative (70%) to avoid overestimating and immediately
+            // switching back.
+            size_t adjustedBandwidthBps = bandwidthBps;
+            if (index > mCurBandwidthIndex) {
+                adjustedBandwidthBps = adjustedBandwidthBps * 7 / 10;
+            } else {
+                adjustedBandwidthBps = adjustedBandwidthBps * 8 / 10;
+            }
+            if (mBandwidthItems.itemAt(index).mBandwidth <= adjustedBandwidthBps) {
+                break;
+            }
             --index;
         }
     }
@@ -1394,6 +1418,7 @@
     // All fetchers have now been started, the configuration change
     // has completed.
 
+    cancelCheckBandwidthEvent();
     scheduleCheckBandwidthEvent();
 
     ALOGV("XXX configuration change completed.");
@@ -1435,6 +1460,44 @@
     tryToFinishBandwidthSwitch();
 }
 
+void LiveSession::onCheckSwitchDown() {
+    if (mSwitchDownMonitor == NULL) {
+        return;
+    }
+
+    for (size_t i = 0; i < kMaxStreams; ++i) {
+        int32_t targetDuration;
+        sp<AnotherPacketSource> packetSource = mPacketSources.valueFor(indexToType(i));
+        sp<AMessage> meta = packetSource->getLatestDequeuedMeta();
+
+        if (meta != NULL && meta->findInt32("targetDuration", &targetDuration) ) {
+            int64_t bufferedDurationUs = packetSource->getEstimatedDurationUs();
+            int64_t targetDurationUs = targetDuration * 1000000ll;
+
+            if (bufferedDurationUs < targetDurationUs / 3) {
+                (new AMessage(kWhatSwitchDown, id()))->post();
+                break;
+            }
+        }
+    }
+
+    mSwitchDownMonitor->post(1000000ll);
+}
+
+void LiveSession::onSwitchDown() {
+    if (mReconfigurationInProgress || mSwitchInProgress || mCurBandwidthIndex == 0) {
+        return;
+    }
+
+    ssize_t bandwidthIndex = getBandwidthIndex();
+    if (bandwidthIndex < mCurBandwidthIndex) {
+        changeConfiguration(-1, bandwidthIndex, false);
+        return;
+    }
+
+    changeConfiguration(-1, mCurBandwidthIndex - 1, false);
+}
+
 // Mark switch done when:
 //   1. all old buffers are swapped out
 void LiveSession::tryToFinishBandwidthSwitch() {
@@ -1492,20 +1555,16 @@
     }
 }
 
-void LiveSession::onCheckBandwidth() {
+void LiveSession::onCheckBandwidth(const sp<AMessage> &msg) {
     size_t bandwidthIndex = getBandwidthIndex();
     if (canSwitchBandwidthTo(bandwidthIndex)) {
         changeConfiguration(-1ll /* timeUs */, bandwidthIndex);
     } else {
-        scheduleCheckBandwidthEvent();
+        // Come back and check again 10 seconds later in case there is nothing to do now.
+        // If we DO change configuration, once that completes it'll schedule a new
+        // check bandwidth event with an incremented mCheckBandwidthGeneration.
+        msg->post(10000000ll);
     }
-
-    // Handling the kWhatCheckBandwidth even here does _not_ automatically
-    // schedule another one on return, only an explicit call to
-    // scheduleCheckBandwidthEvent will do that.
-    // This ensures that only one configuration change is ongoing at any
-    // one time, once that completes it'll schedule another check bandwidth
-    // event.
 }
 
 void LiveSession::postPrepared(status_t err) {
@@ -1522,6 +1581,9 @@
     notify->post();
 
     mInPreparationPhase = false;
+
+    mSwitchDownMonitor = new AMessage(kWhatCheckSwitchDown, id());
+    mSwitchDownMonitor->post();
 }
 
 }  // namespace android
diff --git a/media/libstagefright/httplive/LiveSession.h b/media/libstagefright/httplive/LiveSession.h
index 5423f0f..8a800da 100644
--- a/media/libstagefright/httplive/LiveSession.h
+++ b/media/libstagefright/httplive/LiveSession.h
@@ -108,6 +108,8 @@
         kWhatChangeConfiguration3       = 'chC3',
         kWhatFinishDisconnect2          = 'fin2',
         kWhatSwapped                    = 'swap',
+        kWhatCheckSwitchDown            = 'ckSD',
+        kWhatSwitchDown                 = 'sDwn',
     };
 
     struct BandwidthItem {
@@ -202,6 +204,7 @@
     bool mFirstTimeUsValid;
     int64_t mFirstTimeUs;
     int64_t mLastSeekTimeUs;
+    sp<AMessage> mSwitchDownMonitor;
     KeyedVector<size_t, int64_t> mDiscontinuityAbsStartTimesUs;
     KeyedVector<size_t, int64_t> mDiscontinuityOffsetTimesUs;
 
@@ -246,6 +249,8 @@
     void onChangeConfiguration2(const sp<AMessage> &msg);
     void onChangeConfiguration3(const sp<AMessage> &msg);
     void onSwapped(const sp<AMessage> &msg);
+    void onCheckSwitchDown();
+    void onSwitchDown();
     void tryToFinishBandwidthSwitch();
 
     void scheduleCheckBandwidthEvent();
@@ -257,7 +262,7 @@
     void cancelBandwidthSwitch();
 
     bool canSwitchBandwidthTo(size_t bandwidthIndex);
-    void onCheckBandwidth();
+    void onCheckBandwidth(const sp<AMessage> &msg);
 
     void finishDisconnect();
 
diff --git a/media/libstagefright/httplive/PlaylistFetcher.cpp b/media/libstagefright/httplive/PlaylistFetcher.cpp
index 82a4c39..3ef0f06 100644
--- a/media/libstagefright/httplive/PlaylistFetcher.cpp
+++ b/media/libstagefright/httplive/PlaylistFetcher.cpp
@@ -737,12 +737,6 @@
     const int32_t lastSeqNumberInPlaylist =
         firstSeqNumberInPlaylist + (int32_t)mPlaylist->size() - 1;
 
-    if (mStartup && mSeqNumber >= 0
-            && (mSeqNumber < firstSeqNumberInPlaylist || mSeqNumber > lastSeqNumberInPlaylist)) {
-        // in case we guessed wrong during reconfiguration, try fetching the latest content.
-        mSeqNumber = lastSeqNumberInPlaylist;
-    }
-
     if (mDiscontinuitySeq < 0) {
         mDiscontinuitySeq = mPlaylist->getDiscontinuitySeq();
     }
diff --git a/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp b/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp
index 010063f..c74c3e7 100644
--- a/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp
+++ b/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp
@@ -42,7 +42,8 @@
       mLastQueuedTimeUs(0),
       mEOSResult(OK),
       mLatestEnqueuedMeta(NULL),
-      mLatestDequeuedMeta(NULL) {
+      mLatestDequeuedMeta(NULL),
+      mQueuedDiscontinuityCount(0) {
     setFormat(meta);
 }
 
@@ -122,6 +123,7 @@
                 mFormat.clear();
             }
 
+            --mQueuedDiscontinuityCount;
             return INFO_DISCONTINUITY;
         }
 
@@ -210,6 +212,11 @@
     mBuffers.push_back(buffer);
     mCondition.signal();
 
+    int32_t discontinuity;
+    if (buffer->meta()->findInt32("discontinuity", &discontinuity)) {
+        ++mQueuedDiscontinuityCount;
+    }
+
     if (mLatestEnqueuedMeta == NULL) {
         mLatestEnqueuedMeta = buffer->meta();
     } else {
@@ -226,6 +233,7 @@
 
     mBuffers.clear();
     mEOSResult = OK;
+    mQueuedDiscontinuityCount = 0;
 
     mFormat = NULL;
     mLatestEnqueuedMeta = NULL;
@@ -262,6 +270,7 @@
     mEOSResult = OK;
     mLastQueuedTimeUs = 0;
     mLatestEnqueuedMeta = NULL;
+    ++mQueuedDiscontinuityCount;
 
     sp<ABuffer> buffer = new ABuffer(0);
     buffer->meta()->setInt32("discontinuity", static_cast<int32_t>(type));
@@ -291,7 +300,10 @@
 
 int64_t AnotherPacketSource::getBufferedDurationUs(status_t *finalResult) {
     Mutex::Autolock autoLock(mLock);
+    return getBufferedDurationUs_l(finalResult);
+}
 
+int64_t AnotherPacketSource::getBufferedDurationUs_l(status_t *finalResult) {
     *finalResult = mEOSResult;
 
     if (mBuffers.empty()) {
@@ -300,6 +312,7 @@
 
     int64_t time1 = -1;
     int64_t time2 = -1;
+    int64_t durationUs = 0;
 
     List<sp<ABuffer> >::iterator it = mBuffers.begin();
     while (it != mBuffers.end()) {
@@ -307,20 +320,64 @@
 
         int64_t timeUs;
         if (buffer->meta()->findInt64("timeUs", &timeUs)) {
-            if (time1 < 0) {
+            if (time1 < 0 || timeUs < time1) {
                 time1 = timeUs;
             }
 
-            time2 = timeUs;
+            if (time2 < 0 || timeUs > time2) {
+                time2 = timeUs;
+            }
         } else {
             // This is a discontinuity, reset everything.
+            durationUs += time2 - time1;
             time1 = time2 = -1;
         }
 
         ++it;
     }
 
-    return time2 - time1;
+    return durationUs + (time2 - time1);
+}
+
+// A cheaper but less precise version of getBufferedDurationUs that we would like to use in
+// LiveSession::dequeueAccessUnit to trigger downwards adaptation.
+int64_t AnotherPacketSource::getEstimatedDurationUs() {
+    Mutex::Autolock autoLock(mLock);
+    if (mBuffers.empty()) {
+        return 0;
+    }
+
+    if (mQueuedDiscontinuityCount > 0) {
+        status_t finalResult;
+        return getBufferedDurationUs_l(&finalResult);
+    }
+
+    List<sp<ABuffer> >::iterator it = mBuffers.begin();
+    sp<ABuffer> buffer = *it;
+
+    int64_t startTimeUs;
+    buffer->meta()->findInt64("timeUs", &startTimeUs);
+    if (startTimeUs < 0) {
+        return 0;
+    }
+
+    it = mBuffers.end();
+    --it;
+    buffer = *it;
+
+    int64_t endTimeUs;
+    buffer->meta()->findInt64("timeUs", &endTimeUs);
+    if (endTimeUs < 0) {
+        return 0;
+    }
+
+    int64_t diffUs;
+    if (endTimeUs > startTimeUs) {
+        diffUs = endTimeUs - startTimeUs;
+    } else {
+        diffUs = startTimeUs - endTimeUs;
+    }
+    return diffUs;
 }
 
 status_t AnotherPacketSource::nextBufferTime(int64_t *timeUs) {
diff --git a/media/libstagefright/mpeg2ts/AnotherPacketSource.h b/media/libstagefright/mpeg2ts/AnotherPacketSource.h
index 0c717d7..809a858 100644
--- a/media/libstagefright/mpeg2ts/AnotherPacketSource.h
+++ b/media/libstagefright/mpeg2ts/AnotherPacketSource.h
@@ -49,6 +49,8 @@
     // presentation timestamps since the last discontinuity (if any).
     int64_t getBufferedDurationUs(status_t *finalResult);
 
+    int64_t getEstimatedDurationUs();
+
     status_t nextBufferTime(int64_t *timeUs);
 
     void queueAccessUnit(const sp<ABuffer> &buffer);
@@ -83,7 +85,10 @@
     sp<AMessage> mLatestEnqueuedMeta;
     sp<AMessage> mLatestDequeuedMeta;
 
+    size_t  mQueuedDiscontinuityCount;
+
     bool wasFormatChange(int32_t discontinuityType) const;
+    int64_t getBufferedDurationUs_l(status_t *finalResult);
 
     DISALLOW_EVIL_CONSTRUCTORS(AnotherPacketSource);
 };
diff --git a/services/audiopolicy/AudioPolicyManager.cpp b/services/audiopolicy/AudioPolicyManager.cpp
index 6adcde4..22c4e04 100644
--- a/services/audiopolicy/AudioPolicyManager.cpp
+++ b/services/audiopolicy/AudioPolicyManager.cpp
@@ -64,6 +64,7 @@
 const StringToEnum sDeviceNameToEnumTable[] = {
     STRING_TO_ENUM(AUDIO_DEVICE_OUT_EARPIECE),
     STRING_TO_ENUM(AUDIO_DEVICE_OUT_SPEAKER),
+    STRING_TO_ENUM(AUDIO_DEVICE_OUT_SPEAKER_SAFE),
     STRING_TO_ENUM(AUDIO_DEVICE_OUT_WIRED_HEADSET),
     STRING_TO_ENUM(AUDIO_DEVICE_OUT_WIRED_HEADPHONE),
     STRING_TO_ENUM(AUDIO_DEVICE_OUT_BLUETOOTH_SCO),
@@ -3824,6 +3825,14 @@
             break;
         }
     }
+
+    /*Filter SPEAKER_SAFE out of results, as AudioService doesn't know about it
+      and doesn't really need to.*/
+    if (devices & AUDIO_DEVICE_OUT_SPEAKER_SAFE) {
+        devices |= AUDIO_DEVICE_OUT_SPEAKER;
+        devices &= ~AUDIO_DEVICE_OUT_SPEAKER_SAFE;
+    }
+
     return devices;
 }
 
@@ -3926,12 +3935,20 @@
             //   the isStreamActive() method only informs about the activity of a stream, not
             //   if it's for local playback. Note also that we use the same delay between both tests
             device = getDeviceForStrategy(STRATEGY_SONIFICATION, false /*fromCache*/);
+            //user "safe" speaker if available instead of normal speaker to avoid triggering
+            //other acoustic safety mechanisms for notification
+            if (device == AUDIO_DEVICE_OUT_SPEAKER && (availableOutputDeviceTypes & AUDIO_DEVICE_OUT_SPEAKER_SAFE))
+                device = AUDIO_DEVICE_OUT_SPEAKER_SAFE;
         } else if (isStreamActive(AUDIO_STREAM_MUSIC, SONIFICATION_RESPECTFUL_AFTER_MUSIC_DELAY)) {
             // while media is playing (or has recently played), use the same device
             device = getDeviceForStrategy(STRATEGY_MEDIA, false /*fromCache*/);
         } else {
             // when media is not playing anymore, fall back on the sonification behavior
             device = getDeviceForStrategy(STRATEGY_SONIFICATION, false /*fromCache*/);
+            //user "safe" speaker if available instead of normal speaker to avoid triggering
+            //other acoustic safety mechanisms for notification
+            if (device == AUDIO_DEVICE_OUT_SPEAKER && (availableOutputDeviceTypes & AUDIO_DEVICE_OUT_SPEAKER_SAFE))
+                device = AUDIO_DEVICE_OUT_SPEAKER_SAFE;
         }
 
         break;
@@ -4668,6 +4685,10 @@
         }
     }
 
+    /*SPEAKER_SAFE is an alias of SPEAKER for purposes of volume control*/
+    if (device == AUDIO_DEVICE_OUT_SPEAKER_SAFE)
+        device = AUDIO_DEVICE_OUT_SPEAKER;
+
     ALOGW_IF(popcount(device) != 1,
             "getDeviceForVolume() invalid device combination: %08x",
             device);
diff --git a/services/camera/libcameraservice/api1/Camera2Client.cpp b/services/camera/libcameraservice/api1/Camera2Client.cpp
index 36a93b2..6f4a507 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.cpp
+++ b/services/camera/libcameraservice/api1/Camera2Client.cpp
@@ -1291,6 +1291,9 @@
 
             return OK;
         }
+        if (l.mParameters.zslMode) {
+            mZslProcessor->clearZslQueue();
+        }
     }
     syncWithDevice();
 
@@ -1379,8 +1382,14 @@
 
     SharedParameters::Lock l(mParameters);
 
+    Parameters::focusMode_t focusModeBefore = l.mParameters.focusMode;
     res = l.mParameters.set(params);
     if (res != OK) return res;
+    Parameters::focusMode_t focusModeAfter = l.mParameters.focusMode;
+
+    if (l.mParameters.zslMode && focusModeAfter != focusModeBefore) {
+        mZslProcessor->clearZslQueue();
+    }
 
     res = updateRequests(l.mParameters);
 
diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp b/services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp
index 2d31275..fa65b74 100644
--- a/services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp
+++ b/services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp
@@ -44,6 +44,7 @@
     sp<Camera2Client> client,
     wp<CaptureSequencer> sequencer):
         Thread(false),
+        mLatestClearedBufferTimestamp(0),
         mState(RUNNING),
         mClient(client),
         mSequencer(sequencer),
@@ -107,7 +108,6 @@
         ALOGE("%s: metadata doesn't have timestamp, skip this result", __FUNCTION__);
         return;
     }
-    (void)timestamp;
 
     entry = result.mMetadata.find(ANDROID_REQUEST_FRAME_COUNT);
     if (entry.count == 0) {
@@ -120,6 +120,9 @@
 
     if (mState != RUNNING) return;
 
+    // Corresponding buffer has been cleared. No need to push into mFrameList
+    if (timestamp <= mLatestClearedBufferTimestamp) return;
+
     mFrameList.editItemAt(mFrameListHead) = result.mMetadata;
     mFrameListHead = (mFrameListHead + 1) % mFrameListDepth;
 }
@@ -392,7 +395,7 @@
     if (mZslStream != 0) {
         // clear result metadata list first.
         clearZslResultQueueLocked();
-        return mZslStream->clearInputRingBuffer();
+        return mZslStream->clearInputRingBuffer(&mLatestClearedBufferTimestamp);
     }
     return OK;
 }
@@ -454,6 +457,23 @@
     }
 }
 
+bool ZslProcessor3::isFixedFocusMode(uint8_t afMode) const {
+    switch (afMode) {
+        case ANDROID_CONTROL_AF_MODE_AUTO:
+        case ANDROID_CONTROL_AF_MODE_CONTINUOUS_VIDEO:
+        case ANDROID_CONTROL_AF_MODE_CONTINUOUS_PICTURE:
+        case ANDROID_CONTROL_AF_MODE_MACRO:
+            return false;
+            break;
+        case ANDROID_CONTROL_AF_MODE_OFF:
+        case ANDROID_CONTROL_AF_MODE_EDOF:
+            return true;
+        default:
+            ALOGE("%s: unknown focus mode %d", __FUNCTION__, afMode);
+            return false;
+    }
+}
+
 nsecs_t ZslProcessor3::getCandidateTimestampLocked(size_t* metadataIdx) const {
     /**
      * Find the smallest timestamp we know about so far
@@ -499,8 +519,16 @@
                     continue;
                 }
 
-                // Check AF state if device has focuser
-                if (mHasFocuser) {
+                entry = frame.find(ANDROID_CONTROL_AF_MODE);
+                if (entry.count == 0) {
+                    ALOGW("%s: ZSL queue frame has no AF mode field!",
+                            __FUNCTION__);
+                    continue;
+                }
+                uint8_t afMode = entry.data.u8[0];
+
+                // Check AF state if device has focuser and focus mode isn't fixed
+                if (mHasFocuser && !isFixedFocusMode(afMode)) {
                     // Make sure the candidate frame has good focus.
                     entry = frame.find(ANDROID_CONTROL_AF_STATE);
                     if (entry.count == 0) {
diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessor3.h b/services/camera/libcameraservice/api1/client2/ZslProcessor3.h
index daa352b..2975f7c 100644
--- a/services/camera/libcameraservice/api1/client2/ZslProcessor3.h
+++ b/services/camera/libcameraservice/api1/client2/ZslProcessor3.h
@@ -82,6 +82,7 @@
 
   private:
     static const nsecs_t kWaitDuration = 10000000; // 10 ms
+    nsecs_t mLatestClearedBufferTimestamp;
 
     enum {
         RUNNING,
@@ -132,6 +133,8 @@
     void dumpZslQueue(int id) const;
 
     nsecs_t getCandidateTimestampLocked(size_t* metadataIdx) const;
+
+    bool isFixedFocusMode(uint8_t afMode) const;
 };
 
 
diff --git a/services/camera/libcameraservice/device3/Camera3ZslStream.cpp b/services/camera/libcameraservice/device3/Camera3ZslStream.cpp
index 92bf81b..81330ea 100644
--- a/services/camera/libcameraservice/device3/Camera3ZslStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3ZslStream.cpp
@@ -315,20 +315,24 @@
     return OK;
 }
 
-status_t Camera3ZslStream::clearInputRingBuffer() {
+status_t Camera3ZslStream::clearInputRingBuffer(nsecs_t* latestTimestamp) {
     Mutex::Autolock l(mLock);
 
-    return clearInputRingBufferLocked();
+    return clearInputRingBufferLocked(latestTimestamp);
 }
 
-status_t Camera3ZslStream::clearInputRingBufferLocked() {
+status_t Camera3ZslStream::clearInputRingBufferLocked(nsecs_t* latestTimestamp) {
+
+    if (latestTimestamp) {
+        *latestTimestamp = mProducer->getLatestTimestamp();
+    }
     mInputBufferQueue.clear();
 
     return mProducer->clear();
 }
 
 status_t Camera3ZslStream::disconnectLocked() {
-    clearInputRingBufferLocked();
+    clearInputRingBufferLocked(NULL);
 
     return Camera3OutputStream::disconnectLocked();
 }
diff --git a/services/camera/libcameraservice/device3/Camera3ZslStream.h b/services/camera/libcameraservice/device3/Camera3ZslStream.h
index d89c38d..5323a49 100644
--- a/services/camera/libcameraservice/device3/Camera3ZslStream.h
+++ b/services/camera/libcameraservice/device3/Camera3ZslStream.h
@@ -59,8 +59,10 @@
 
     /**
      * Clears the buffers that can be used by enqueueInputBufferByTimestamp
+     * latestTimestamp will be filled with the largest timestamp of buffers
+     * being cleared, 0 if there is no buffer being clear.
      */
-    status_t clearInputRingBuffer();
+    status_t clearInputRingBuffer(nsecs_t* latestTimestamp);
 
   protected:
 
@@ -100,7 +102,7 @@
     // Disconnet the Camera3ZslStream specific bufferQueues.
     virtual status_t disconnectLocked();
 
-    status_t clearInputRingBufferLocked();
+    status_t clearInputRingBufferLocked(nsecs_t* latestTimestamp);
 
 }; // class Camera3ZslStream
 
diff --git a/services/camera/libcameraservice/gui/RingBufferConsumer.cpp b/services/camera/libcameraservice/gui/RingBufferConsumer.cpp
index e4ec5fd..f8562ec 100644
--- a/services/camera/libcameraservice/gui/RingBufferConsumer.cpp
+++ b/services/camera/libcameraservice/gui/RingBufferConsumer.cpp
@@ -41,7 +41,8 @@
         uint32_t consumerUsage,
         int bufferCount) :
     ConsumerBase(consumer),
-    mBufferCount(bufferCount)
+    mBufferCount(bufferCount),
+    mLatestTimestamp(0)
 {
     mConsumer->setConsumerUsageBits(consumerUsage);
     mConsumer->setMaxAcquiredBufferCount(bufferCount);
@@ -152,6 +153,14 @@
     return OK;
 }
 
+nsecs_t RingBufferConsumer::getLatestTimestamp() {
+    Mutex::Autolock _l(mMutex);
+    if (mBufferItemList.size() == 0) {
+        return 0;
+    }
+    return mLatestTimestamp;
+}
+
 void RingBufferConsumer::pinBufferLocked(const BufferItem& item) {
     List<RingBufferItem>::iterator it, end;
 
@@ -302,6 +311,13 @@
                 item.mTimestamp,
                 mBufferItemList.size(), mBufferCount);
 
+        if (item.mTimestamp < mLatestTimestamp) {
+            BI_LOGE("Timestamp  decreases from %" PRId64 " to %" PRId64,
+                    mLatestTimestamp, item.mTimestamp);
+        }
+
+        mLatestTimestamp = item.mTimestamp;
+
         item.mGraphicBuffer = mSlots[item.mBuf].mGraphicBuffer;
     } // end of mMutex lock
 
diff --git a/services/camera/libcameraservice/gui/RingBufferConsumer.h b/services/camera/libcameraservice/gui/RingBufferConsumer.h
index a03736d..da97a11 100644
--- a/services/camera/libcameraservice/gui/RingBufferConsumer.h
+++ b/services/camera/libcameraservice/gui/RingBufferConsumer.h
@@ -159,6 +159,9 @@
     // Release all the non-pinned buffers in the ring buffer
     status_t clear();
 
+    // Return 0 if RingBuffer is empty, otherwise return timestamp of latest buffer.
+    nsecs_t getLatestTimestamp();
+
   private:
 
     // Override ConsumerBase::onFrameAvailable
@@ -180,6 +183,9 @@
     // List of acquired buffers in our ring buffer
     List<RingBufferItem>       mBufferItemList;
     const int                  mBufferCount;
+
+    // Timestamp of latest buffer
+    nsecs_t mLatestTimestamp;
 };
 
 } // namespace android