Merge "MediaBuffer: ABuffer will release MediaBuffer when it's destructed." into lmp-dev
diff --git a/include/media/AudioEffect.h b/include/media/AudioEffect.h
index f98002d..4932d40 100644
--- a/include/media/AudioEffect.h
+++ b/include/media/AudioEffect.h
@@ -449,6 +449,7 @@
     sp<EffectClient>        mIEffectClient;     // IEffectClient implementation
     sp<IMemory>             mCblkMemory;        // shared memory for deferred parameter setting
     effect_param_cblk_t*    mCblk;              // control block for deferred parameter setting
+    pid_t                   mClientPid;
 };
 
 
diff --git a/include/media/AudioTrack.h b/include/media/AudioTrack.h
index 72e51f9..b5256f0 100644
--- a/include/media/AudioTrack.h
+++ b/include/media/AudioTrack.h
@@ -580,7 +580,14 @@
      * Caution: calling this method too often may be inefficient;
      * if you need a high resolution mapping between frame position and presentation time,
      * consider implementing that at application level, based on the low resolution timestamps.
-     * Returns NO_ERROR if timestamp is valid.
+     * Returns NO_ERROR    if timestamp is valid.
+     *         WOULD_BLOCK if called in STOPPED or FLUSHED state, or if called immediately after
+     *                     start/ACTIVE, when the number of frames consumed is less than the
+     *                     overall hardware latency to physical output. In WOULD_BLOCK cases,
+     *                     one might poll again, or use getPosition(), or use 0 position and
+     *                     current time for the timestamp.
+     *         INVALID_OPERATION  if called on a FastTrack, wrong state, or some other error.
+     *
      * The timestamp parameter is undefined on return, if status is not NO_ERROR.
      */
             status_t    getTimestamp(AudioTimestamp& timestamp);
@@ -747,6 +754,8 @@
                                                     // reset by stop() but continues monotonically
                                                     // after new IAudioTrack to restore mPosition,
                                                     // and could be easily widened to uint64_t
+    int64_t                 mStartUs;               // the start time after flush or stop.
+                                                    // only used for offloaded and direct tracks.
 
     audio_output_flags_t    mFlags;
         // const after set(), except for bits AUDIO_OUTPUT_FLAG_FAST and AUDIO_OUTPUT_FLAG_OFFLOAD.
diff --git a/include/media/MediaPlayerInterface.h b/include/media/MediaPlayerInterface.h
index 87717da..cf18a45 100644
--- a/include/media/MediaPlayerInterface.h
+++ b/include/media/MediaPlayerInterface.h
@@ -27,6 +27,7 @@
 
 #include <media/mediaplayer.h>
 #include <media/AudioSystem.h>
+#include <media/AudioTimestamp.h>
 #include <media/Metadata.h>
 
 // Fwd decl to make sure everyone agrees that the scope of struct sockaddr_in is
@@ -97,6 +98,7 @@
         virtual uint32_t    latency() const = 0;
         virtual float       msecsPerFrame() const = 0;
         virtual status_t    getPosition(uint32_t *position) const = 0;
+        virtual status_t    getTimestamp(AudioTimestamp &ts) const = 0;
         virtual status_t    getFramesWritten(uint32_t *frameswritten) const = 0;
         virtual int         getSessionId() const = 0;
         virtual audio_stream_type_t getAudioStreamType() const = 0;
diff --git a/media/libmedia/AudioEffect.cpp b/media/libmedia/AudioEffect.cpp
index 35f6557..0d5d7e4 100644
--- a/media/libmedia/AudioEffect.cpp
+++ b/media/libmedia/AudioEffect.cpp
@@ -145,15 +145,19 @@
         return mStatus;
     }
 
-    mIEffect = iEffect;
     mCblkMemory = cblk;
     mCblk = static_cast<effect_param_cblk_t*>(cblk->pointer());
     int bufOffset = ((sizeof(effect_param_cblk_t) - 1) / sizeof(int) + 1) * sizeof(int);
     mCblk->buffer = (uint8_t *)mCblk + bufOffset;
 
     iEffect->asBinder()->linkToDeath(mIEffectClient);
-    ALOGV("set() %p OK effect: %s id: %d status %d enabled %d", this, mDescriptor.name, mId,
-            mStatus, mEnabled);
+    mClientPid = IPCThreadState::self()->getCallingPid();
+    ALOGV("set() %p OK effect: %s id: %d status %d enabled %d pid %d", this, mDescriptor.name, mId,
+            mStatus, mEnabled, mClientPid);
+
+    if (mSessionId > AUDIO_SESSION_OUTPUT_MIX) {
+        AudioSystem::acquireAudioSessionId(mSessionId, mClientPid);
+    }
 
     return mStatus;
 }
@@ -164,6 +168,9 @@
     ALOGV("Destructor %p", this);
 
     if (mStatus == NO_ERROR || mStatus == ALREADY_EXISTS) {
+        if (mSessionId > AUDIO_SESSION_OUTPUT_MIX) {
+            AudioSystem::releaseAudioSessionId(mSessionId, mClientPid);
+        }
         if (mIEffect != NULL) {
             mIEffect->disconnect();
             mIEffect->asBinder()->unlinkToDeath(mIEffectClient);
diff --git a/media/libmedia/AudioTrack.cpp b/media/libmedia/AudioTrack.cpp
index ff7da83..ea7b279 100644
--- a/media/libmedia/AudioTrack.cpp
+++ b/media/libmedia/AudioTrack.cpp
@@ -37,6 +37,19 @@
 namespace android {
 // ---------------------------------------------------------------------------
 
+static int64_t convertTimespecToUs(const struct timespec &tv)
+{
+    return tv.tv_sec * 1000000ll + tv.tv_nsec / 1000;
+}
+
+// current monotonic time in microseconds.
+static int64_t getNowUs()
+{
+    struct timespec tv;
+    (void) clock_gettime(CLOCK_MONOTONIC, &tv);
+    return convertTimespecToUs(tv);
+}
+
 // static
 status_t AudioTrack::getMinFrameCount(
         size_t* frameCount,
@@ -420,6 +433,7 @@
     mServer = 0;
     mPosition = 0;
     mReleased = 0;
+    mStartUs = 0;
     AudioSystem::acquireAudioSessionId(mSessionId, mClientPid);
     mSequence = 1;
     mObservedSequence = mSequence;
@@ -451,6 +465,12 @@
         // reset current position as seen by client to 0
         mPosition = 0;
         mReleased = 0;
+        // For offloaded tracks, we don't know if the hardware counters are really zero here,
+        // since the flush is asynchronous and stop may not fully drain.
+        // We save the time when the track is started to later verify whether
+        // the counters are realistic (i.e. start from zero after this time).
+        mStartUs = getNowUs();
+
         // force refresh of remaining frames by processAudioBuffer() as last
         // write before stop could be partial.
         mRefreshRemaining = true;
@@ -587,9 +607,18 @@
 
     if (isOffloaded_l()) {
         if (mOutput != AUDIO_IO_HANDLE_NONE) {
+            // An offload output can be re-used between two audio tracks having
+            // the same configuration. A timestamp query for a paused track
+            // while the other is running would return an incorrect time.
+            // To fix this, cache the playback position on a pause() and return
+            // this time when requested until the track is resumed.
+
+            // OffloadThread sends HAL pause in its threadLoop. Time saved
+            // here can be slightly off.
+
+            // TODO: check return code for getRenderPosition.
+
             uint32_t halFrames;
-            // OffloadThread sends HAL pause in its threadLoop.. time saved
-            // here can be slightly off
             AudioSystem::getRenderPosition(mOutput, &halFrames, &mPausedPosition);
             ALOGV("AudioTrack::pause for offload, cache current position %u", mPausedPosition);
         }
@@ -825,6 +854,8 @@
             uint32_t halFrames;
             AudioSystem::getRenderPosition(mOutput, &halFrames, &dspFrames);
         }
+        // FIXME: dspFrames may not be zero in (mState == STATE_STOPPED || mState == STATE_FLUSHED)
+        // due to hardware latency. We leave this behavior for now.
         *position = dspFrames;
     } else {
         // IAudioTrack::stop() isn't synchronous; we don't know when presentation completes
@@ -1881,13 +1912,70 @@
     if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
         return INVALID_OPERATION;
     }
-    if (mState != STATE_ACTIVE && mState != STATE_PAUSED) {
-        return INVALID_OPERATION;
+
+    switch (mState) {
+    case STATE_ACTIVE:
+    case STATE_PAUSED:
+        break; // handle below
+    case STATE_FLUSHED:
+    case STATE_STOPPED:
+        return WOULD_BLOCK;
+    case STATE_STOPPING:
+    case STATE_PAUSED_STOPPING:
+        if (!isOffloaded_l()) {
+            return INVALID_OPERATION;
+        }
+        break; // offloaded tracks handled below
+    default:
+        LOG_ALWAYS_FATAL("Invalid mState in getTimestamp(): %d", mState);
+        break;
     }
+
     // The presented frame count must always lag behind the consumed frame count.
     // To avoid a race, read the presented frames first.  This ensures that presented <= consumed.
     status_t status = mAudioTrack->getTimestamp(timestamp);
-    if (status == NO_ERROR) {
+    if (status != NO_ERROR) {
+        ALOGV_IF(status != WOULD_BLOCK, "getTimestamp error:%#x", status);
+        return status;
+    }
+    if (isOffloadedOrDirect_l()) {
+        if (isOffloaded_l() && (mState == STATE_PAUSED || mState == STATE_PAUSED_STOPPING)) {
+            // use cached paused position in case another offloaded track is running.
+            timestamp.mPosition = mPausedPosition;
+            clock_gettime(CLOCK_MONOTONIC, &timestamp.mTime);
+            return NO_ERROR;
+        }
+
+        // Check whether a pending flush or stop has completed, as those commands may
+        // be asynchronous or return near finish.
+        if (mStartUs != 0 && mSampleRate != 0) {
+            static const int kTimeJitterUs = 100000; // 100 ms
+            static const int k1SecUs = 1000000;
+
+            const int64_t timeNow = getNowUs();
+
+            if (timeNow < mStartUs + k1SecUs) { // within first second of starting
+                const int64_t timestampTimeUs = convertTimespecToUs(timestamp.mTime);
+                if (timestampTimeUs < mStartUs) {
+                    return WOULD_BLOCK;  // stale timestamp time, occurs before start.
+                }
+                const int64_t deltaTimeUs = timestampTimeUs - mStartUs;
+                const int64_t deltaPositionByUs = timestamp.mPosition * 1000000LL / mSampleRate;
+
+                if (deltaPositionByUs > deltaTimeUs + kTimeJitterUs) {
+                    // Verify that the counter can't count faster than the sample rate
+                    // since the start time.  If greater, then that means we have failed
+                    // to completely flush or stop the previous playing track.
+                    ALOGW("incomplete flush or stop:"
+                            " deltaTimeUs(%lld) deltaPositionUs(%lld) tsmPosition(%u)",
+                            (long long)deltaTimeUs, (long long)deltaPositionByUs,
+                            timestamp.mPosition);
+                    return WOULD_BLOCK;
+                }
+            }
+            mStartUs = 0; // no need to check again, start timestamp has either expired or unneeded.
+        }
+    } else {
         // Update the mapping between local consumed (mPosition) and server consumed (mServer)
         (void) updateAndGetPosition_l();
         // Server consumed (mServer) and presented both use the same server time base,
diff --git a/media/libmediaplayerservice/Android.mk b/media/libmediaplayerservice/Android.mk
index adc066d..2cf5710 100644
--- a/media/libmediaplayerservice/Android.mk
+++ b/media/libmediaplayerservice/Android.mk
@@ -22,6 +22,7 @@
     StagefrightPlayer.cpp       \
     StagefrightRecorder.cpp     \
     TestPlayerStub.cpp          \
+    VideoFrameScheduler.cpp     \
 
 LOCAL_SHARED_LIBRARIES :=       \
     libbinder                   \
diff --git a/media/libmediaplayerservice/MediaPlayerService.cpp b/media/libmediaplayerservice/MediaPlayerService.cpp
index c8cb7ed..8eb1269 100644
--- a/media/libmediaplayerservice/MediaPlayerService.cpp
+++ b/media/libmediaplayerservice/MediaPlayerService.cpp
@@ -43,6 +43,7 @@
 #include <utils/Errors.h>  // for status_t
 #include <utils/String8.h>
 #include <utils/SystemClock.h>
+#include <utils/Timers.h>
 #include <utils/Vector.h>
 
 #include <media/IMediaHTTPService.h>
@@ -1496,6 +1497,12 @@
     return mTrack->getPosition(position);
 }
 
+status_t MediaPlayerService::AudioOutput::getTimestamp(AudioTimestamp &ts) const
+{
+    if (mTrack == 0) return NO_INIT;
+    return mTrack->getTimestamp(ts);
+}
+
 status_t MediaPlayerService::AudioOutput::getFramesWritten(uint32_t *frameswritten) const
 {
     if (mTrack == 0) return NO_INIT;
@@ -1971,6 +1978,15 @@
     return NO_ERROR;
 }
 
+status_t MediaPlayerService::AudioCache::getTimestamp(AudioTimestamp &ts) const
+{
+    ts.mPosition = mSize / mFrameSize;
+    nsecs_t now = systemTime(SYSTEM_TIME_MONOTONIC);
+    ts.mTime.tv_sec = now / 1000000000LL;
+    ts.mTime.tv_nsec = now - (1000000000LL * ts.mTime.tv_sec);
+    return NO_ERROR;
+}
+
 status_t MediaPlayerService::AudioCache::getFramesWritten(uint32_t *written) const
 {
     if (written == 0) return BAD_VALUE;
diff --git a/media/libmediaplayerservice/MediaPlayerService.h b/media/libmediaplayerservice/MediaPlayerService.h
index 4fe7075..3b96e88 100644
--- a/media/libmediaplayerservice/MediaPlayerService.h
+++ b/media/libmediaplayerservice/MediaPlayerService.h
@@ -85,6 +85,7 @@
         virtual uint32_t        latency() const;
         virtual float           msecsPerFrame() const;
         virtual status_t        getPosition(uint32_t *position) const;
+        virtual status_t        getTimestamp(AudioTimestamp &ts) const;
         virtual status_t        getFramesWritten(uint32_t *frameswritten) const;
         virtual int             getSessionId() const;
         virtual uint32_t        getSampleRate() const;
@@ -198,6 +199,7 @@
         virtual uint32_t        latency() const;
         virtual float           msecsPerFrame() const;
         virtual status_t        getPosition(uint32_t *position) const;
+        virtual status_t        getTimestamp(AudioTimestamp &ts) const;
         virtual status_t        getFramesWritten(uint32_t *frameswritten) const;
         virtual int             getSessionId() const;
         virtual uint32_t        getSampleRate() const;
diff --git a/media/libmediaplayerservice/VideoFrameScheduler.cpp b/media/libmediaplayerservice/VideoFrameScheduler.cpp
new file mode 100644
index 0000000..4251c4e
--- /dev/null
+++ b/media/libmediaplayerservice/VideoFrameScheduler.cpp
@@ -0,0 +1,499 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "VideoFrameScheduler"
+#include <utils/Log.h>
+#define ATRACE_TAG ATRACE_TAG_VIDEO
+#include <utils/Trace.h>
+
+#include <sys/time.h>
+
+#include <binder/IServiceManager.h>
+#include <gui/ISurfaceComposer.h>
+#include <ui/DisplayStatInfo.h>
+
+#include <media/stagefright/foundation/ADebug.h>
+
+#include "VideoFrameScheduler.h"
+
+namespace android {
+
+static const nsecs_t kNanosIn1s = 1000000000;
+
+template<class T>
+inline static const T divRound(const T &nom, const T &den) {
+    if ((nom >= 0) ^ (den >= 0)) {
+        return (nom - den / 2) / den;
+    } else {
+        return (nom + den / 2) / den;
+    }
+}
+
+template<class T>
+inline static T abs(const T &a) {
+    return a < 0 ? -a : a;
+}
+
+template<class T>
+inline static const T &min(const T &a, const T &b) {
+    return a < b ? a : b;
+}
+
+template<class T>
+inline static const T &max(const T &a, const T &b) {
+    return a > b ? a : b;
+}
+
+template<class T>
+inline static T periodicError(const T &val, const T &period) {
+    T err = abs(val) % period;
+    return (err < (period / 2)) ? err : (period - err);
+}
+
+template<class T>
+static int compare(const T *lhs, const T *rhs) {
+    if (*lhs < *rhs) {
+        return -1;
+    } else if (*lhs > *rhs) {
+        return 1;
+    } else {
+        return 0;
+    }
+}
+
+/* ======================================================================= */
+/*                                   PLL                                   */
+/* ======================================================================= */
+
+static const size_t kMinSamplesToStartPrime = 3;
+static const size_t kMinSamplesToStopPrime = VideoFrameScheduler::kHistorySize;
+static const size_t kMinSamplesToEstimatePeriod = 3;
+static const size_t kMaxSamplesToEstimatePeriod = VideoFrameScheduler::kHistorySize;
+
+static const size_t kPrecision = 12;
+static const size_t kErrorThreshold = (1 << (kPrecision * 2)) / 10;
+static const int64_t kMultiplesThresholdDiv = 4;            // 25%
+static const int64_t kReFitThresholdDiv = 100;              // 1%
+static const nsecs_t kMaxAllowedFrameSkip = kNanosIn1s;     // 1 sec
+static const nsecs_t kMinPeriod = kNanosIn1s / 120;         // 120Hz
+static const nsecs_t kRefitRefreshPeriod = 10 * kNanosIn1s; // 10 sec
+
+VideoFrameScheduler::PLL::PLL()
+    : mPeriod(-1),
+      mPhase(0),
+      mPrimed(false),
+      mSamplesUsedForPriming(0),
+      mLastTime(-1),
+      mNumSamples(0) {
+}
+
+void VideoFrameScheduler::PLL::reset(float fps) {
+    //test();
+
+    mSamplesUsedForPriming = 0;
+    mLastTime = -1;
+
+    // set up or reset video PLL
+    if (fps <= 0.f) {
+        mPeriod = -1;
+        mPrimed = false;
+    } else {
+        ALOGV("reset at %.1f fps", fps);
+        mPeriod = (nsecs_t)(1e9 / fps + 0.5);
+        mPrimed = true;
+    }
+
+    restart();
+}
+
+// reset PLL but keep previous period estimate
+void VideoFrameScheduler::PLL::restart() {
+    mNumSamples = 0;
+    mPhase = -1;
+}
+
+#if 0
+
+void VideoFrameScheduler::PLL::test() {
+    nsecs_t period = kNanosIn1s / 60;
+    mTimes[0] = 0;
+    mTimes[1] = period;
+    mTimes[2] = period * 3;
+    mTimes[3] = period * 4;
+    mTimes[4] = period * 7;
+    mTimes[5] = period * 8;
+    mTimes[6] = period * 10;
+    mTimes[7] = period * 12;
+    mNumSamples = 8;
+    int64_t a, b, err;
+    fit(0, period * 12 / 7, 8, &a, &b, &err);
+    // a = 0.8(5)+
+    // b = -0.14097(2)+
+    // err = 0.2750578(703)+
+    ALOGD("a=%lld (%.6f), b=%lld (%.6f), err=%lld (%.6f)",
+            (long long)a, (a / (float)(1 << kPrecision)),
+            (long long)b, (b / (float)(1 << kPrecision)),
+            (long long)err, (err / (float)(1 << (kPrecision * 2))));
+}
+
+#endif
+
+void VideoFrameScheduler::PLL::fit(
+        nsecs_t phase, nsecs_t period, size_t numSamplesToUse,
+        int64_t *a, int64_t *b, int64_t *err) {
+    if (numSamplesToUse > mNumSamples) {
+        numSamplesToUse = mNumSamples;
+    }
+
+    int64_t sumX = 0;
+    int64_t sumXX = 0;
+    int64_t sumXY = 0;
+    int64_t sumYY = 0;
+    int64_t sumY = 0;
+
+    int64_t x = 0; // x usually is in [0..numSamplesToUse)
+    nsecs_t lastTime;
+    for (size_t i = 0; i < numSamplesToUse; i++) {
+        size_t ix = (mNumSamples - numSamplesToUse + i) % kHistorySize;
+        nsecs_t time = mTimes[ix];
+        if (i > 0) {
+            x += divRound(time - lastTime, period);
+        }
+        // y is usually in [-numSamplesToUse..numSamplesToUse+kRefitRefreshPeriod/kMinPeriod) << kPrecision
+        //   ideally in [0..numSamplesToUse), but shifted by -numSamplesToUse during
+        //   priming, and possibly shifted by up to kRefitRefreshPeriod/kMinPeriod
+        //   while we are not refitting.
+        int64_t y = divRound(time - phase, period >> kPrecision);
+        sumX += x;
+        sumY += y;
+        sumXX += x * x;
+        sumXY += x * y;
+        sumYY += y * y;
+        lastTime = time;
+    }
+
+    int64_t div   = numSamplesToUse * sumXX - sumX * sumX;
+    int64_t a_nom = numSamplesToUse * sumXY - sumX * sumY;
+    int64_t b_nom = sumXX * sumY            - sumX * sumXY;
+    *a = divRound(a_nom, div);
+    *b = divRound(b_nom, div);
+    // don't use a and b directly as the rounding error is significant
+    *err = sumYY - divRound(a_nom * sumXY + b_nom * sumY, div);
+    ALOGV("fitting[%zu] a=%lld (%.6f), b=%lld (%.6f), err=%lld (%.6f)",
+            numSamplesToUse,
+            (long long)*a,   (*a / (float)(1 << kPrecision)),
+            (long long)*b,   (*b / (float)(1 << kPrecision)),
+            (long long)*err, (*err / (float)(1 << (kPrecision * 2))));
+}
+
+void VideoFrameScheduler::PLL::prime(size_t numSamplesToUse) {
+    if (numSamplesToUse > mNumSamples) {
+        numSamplesToUse = mNumSamples;
+    }
+    CHECK(numSamplesToUse >= 3);  // must have at least 3 samples
+
+    // estimate video framerate from deltas between timestamps, and
+    // 2nd order deltas
+    Vector<nsecs_t> deltas;
+    nsecs_t lastTime, firstTime;
+    for (size_t i = 0; i < numSamplesToUse; ++i) {
+        size_t index = (mNumSamples - numSamplesToUse + i) % kHistorySize;
+        nsecs_t time = mTimes[index];
+        if (i > 0) {
+            if (time - lastTime > kMinPeriod) {
+                //ALOGV("delta: %lld", (long long)(time - lastTime));
+                deltas.push(time - lastTime);
+            }
+        } else {
+            firstTime = time;
+        }
+        lastTime = time;
+    }
+    deltas.sort(compare<nsecs_t>);
+    size_t numDeltas = deltas.size();
+    if (numDeltas > 1) {
+        nsecs_t deltaMinLimit = min(deltas[0] / kMultiplesThresholdDiv, kMinPeriod);
+        nsecs_t deltaMaxLimit = deltas[numDeltas / 2] * kMultiplesThresholdDiv;
+        for (size_t i = numDeltas / 2 + 1; i < numDeltas; ++i) {
+            if (deltas[i] > deltaMaxLimit) {
+                deltas.resize(i);
+                numDeltas = i;
+                break;
+            }
+        }
+        for (size_t i = 1; i < numDeltas; ++i) {
+            nsecs_t delta2nd = deltas[i] - deltas[i - 1];
+            if (delta2nd >= deltaMinLimit) {
+                //ALOGV("delta2: %lld", (long long)(delta2nd));
+                deltas.push(delta2nd);
+            }
+        }
+    }
+
+    // use the one that yields the best match
+    int64_t bestScore;
+    for (size_t i = 0; i < deltas.size(); ++i) {
+        nsecs_t delta = deltas[i];
+        int64_t score = 0;
+#if 1
+        // simplest score: number of deltas that are near multiples
+        size_t matches = 0;
+        for (size_t j = 0; j < deltas.size(); ++j) {
+            nsecs_t err = periodicError(deltas[j], delta);
+            if (err < delta / kMultiplesThresholdDiv) {
+                ++matches;
+            }
+        }
+        score = matches;
+#if 0
+        // could be weighed by the (1 - normalized error)
+        if (numSamplesToUse >= kMinSamplesToEstimatePeriod) {
+            int64_t a, b, err;
+            fit(firstTime, delta, numSamplesToUse, &a, &b, &err);
+            err = (1 << (2 * kPrecision)) - err;
+            score *= max(0, err);
+        }
+#endif
+#else
+        // or use the error as a negative score
+        if (numSamplesToUse >= kMinSamplesToEstimatePeriod) {
+            int64_t a, b, err;
+            fit(firstTime, delta, numSamplesToUse, &a, &b, &err);
+            score = -delta * err;
+        }
+#endif
+        if (i == 0 || score > bestScore) {
+            bestScore = score;
+            mPeriod = delta;
+            mPhase = firstTime;
+        }
+    }
+    ALOGV("priming[%zu] phase:%lld period:%lld", numSamplesToUse, mPhase, mPeriod);
+}
+
+nsecs_t VideoFrameScheduler::PLL::addSample(nsecs_t time) {
+    if (mLastTime >= 0
+            // if time goes backward, or we skipped rendering
+            && (time > mLastTime + kMaxAllowedFrameSkip || time < mLastTime)) {
+        restart();
+    }
+
+    mLastTime = time;
+    mTimes[mNumSamples % kHistorySize] = time;
+    ++mNumSamples;
+
+    bool doFit = time > mRefitAt;
+    if ((mPeriod <= 0 || !mPrimed) && mNumSamples >= kMinSamplesToStartPrime) {
+        prime(kMinSamplesToStopPrime);
+        ++mSamplesUsedForPriming;
+        doFit = true;
+    }
+    if (mPeriod > 0 && mNumSamples >= kMinSamplesToEstimatePeriod) {
+        if (mPhase < 0) {
+            // initialize phase to the current render time
+            mPhase = time;
+            doFit = true;
+        } else if (!doFit) {
+            int64_t err = periodicError(time - mPhase, mPeriod);
+            doFit = err > mPeriod / kReFitThresholdDiv;
+        }
+
+        if (doFit) {
+            int64_t a, b, err;
+            mRefitAt = time + kRefitRefreshPeriod;
+            fit(mPhase, mPeriod, kMaxSamplesToEstimatePeriod, &a, &b, &err);
+            mPhase += (mPeriod * b) >> kPrecision;
+            mPeriod = (mPeriod * a) >> kPrecision;
+            ALOGV("new phase:%lld period:%lld", (long long)mPhase, (long long)mPeriod);
+
+            if (err < kErrorThreshold) {
+                if (!mPrimed && mSamplesUsedForPriming >= kMinSamplesToStopPrime) {
+                    mPrimed = true;
+                }
+            } else {
+                mPrimed = false;
+                mSamplesUsedForPriming = 0;
+            }
+        }
+    }
+    return mPeriod;
+}
+
+/* ======================================================================= */
+/*                             Frame Scheduler                             */
+/* ======================================================================= */
+
+static const nsecs_t kDefaultVsyncPeriod = kNanosIn1s / 60;  // 60Hz
+static const nsecs_t kVsyncRefreshPeriod = kNanosIn1s;       // 1 sec
+
+VideoFrameScheduler::VideoFrameScheduler()
+    : mVsyncTime(0),
+      mVsyncPeriod(0),
+      mVsyncRefreshAt(0),
+      mLastVsyncTime(-1),
+      mTimeCorrection(0) {
+}
+
+void VideoFrameScheduler::updateVsync() {
+    mVsyncRefreshAt = systemTime(SYSTEM_TIME_MONOTONIC) + kVsyncRefreshPeriod;
+    mVsyncPeriod = 0;
+    mVsyncTime = 0;
+
+    // TODO: schedule frames for the destination surface
+    // For now, surface flinger only schedules frames on the primary display
+    if (mComposer == NULL) {
+        String16 name("SurfaceFlinger");
+        sp<IServiceManager> sm = defaultServiceManager();
+        mComposer = interface_cast<ISurfaceComposer>(sm->checkService(name));
+    }
+    if (mComposer != NULL) {
+        DisplayStatInfo stats;
+        status_t res = mComposer->getDisplayStats(NULL /* display */, &stats);
+        if (res == OK) {
+            ALOGV("vsync time:%lld period:%lld",
+                    (long long)stats.vsyncTime, (long long)stats.vsyncPeriod);
+            mVsyncTime = stats.vsyncTime;
+            mVsyncPeriod = stats.vsyncPeriod;
+        } else {
+            ALOGW("getDisplayStats returned %d", res);
+        }
+    } else {
+        ALOGW("could not get surface mComposer service");
+    }
+}
+
+void VideoFrameScheduler::init(float videoFps) {
+    updateVsync();
+
+    mLastVsyncTime = -1;
+    mTimeCorrection = 0;
+
+    mPll.reset(videoFps);
+}
+
+void VideoFrameScheduler::restart() {
+    mLastVsyncTime = -1;
+    mTimeCorrection = 0;
+
+    mPll.restart();
+}
+
+nsecs_t VideoFrameScheduler::getVsyncPeriod() {
+    if (mVsyncPeriod > 0) {
+        return mVsyncPeriod;
+    }
+    return kDefaultVsyncPeriod;
+}
+
+nsecs_t VideoFrameScheduler::schedule(nsecs_t renderTime) {
+    nsecs_t origRenderTime = renderTime;
+
+    nsecs_t now = systemTime(SYSTEM_TIME_MONOTONIC);
+    if (now >= mVsyncRefreshAt) {
+        updateVsync();
+    }
+
+    // without VSYNC info, there is nothing to do
+    if (mVsyncPeriod == 0) {
+        ALOGV("no vsync: render=%lld", (long long)renderTime);
+        return renderTime;
+    }
+
+    // ensure vsync time is well before (corrected) render time
+    if (mVsyncTime > renderTime - 4 * mVsyncPeriod) {
+        mVsyncTime -=
+            ((mVsyncTime - renderTime) / mVsyncPeriod + 5) * mVsyncPeriod;
+    }
+
+    // Video presentation takes place at the VSYNC _after_ renderTime.  Adjust renderTime
+    // so this effectively becomes a rounding operation (to the _closest_ VSYNC.)
+    renderTime -= mVsyncPeriod / 2;
+
+    const nsecs_t videoPeriod = mPll.addSample(origRenderTime);
+    if (videoPeriod > 0) {
+        // Smooth out rendering
+        size_t N = 12;
+        nsecs_t fiveSixthDev =
+            abs(((videoPeriod * 5 + mVsyncPeriod) % (mVsyncPeriod * 6)) - mVsyncPeriod)
+                    / (mVsyncPeriod / 100);
+        // use 20 samples if we are doing 5:6 ratio +- 1% (e.g. playing 50Hz on 60Hz)
+        if (fiveSixthDev < 12) {  /* 12% / 6 = 2% */
+            N = 20;
+        }
+
+        nsecs_t offset = 0;
+        nsecs_t edgeRemainder = 0;
+        for (size_t i = 1; i <= N; i++) {
+            offset +=
+                (renderTime + mTimeCorrection + videoPeriod * i - mVsyncTime) % mVsyncPeriod;
+            edgeRemainder += (videoPeriod * i) % mVsyncPeriod;
+        }
+        mTimeCorrection += mVsyncPeriod / 2 - offset / N;
+        renderTime += mTimeCorrection;
+        nsecs_t correctionLimit = mVsyncPeriod * 3 / 5;
+        edgeRemainder = abs(edgeRemainder / N - mVsyncPeriod / 2);
+        if (edgeRemainder <= mVsyncPeriod / 3) {
+            correctionLimit /= 2;
+        }
+
+        // estimate how many VSYNCs a frame will spend on the display
+        nsecs_t nextVsyncTime =
+            renderTime + mVsyncPeriod - ((renderTime - mVsyncTime) % mVsyncPeriod);
+        if (mLastVsyncTime >= 0) {
+            size_t minVsyncsPerFrame = videoPeriod / mVsyncPeriod;
+            size_t vsyncsForLastFrame = divRound(nextVsyncTime - mLastVsyncTime, mVsyncPeriod);
+            bool vsyncsPerFrameAreNearlyConstant =
+                periodicError(videoPeriod, mVsyncPeriod) / (mVsyncPeriod / 20) == 0;
+
+            if (mTimeCorrection > correctionLimit &&
+                    (vsyncsPerFrameAreNearlyConstant || vsyncsForLastFrame > minVsyncsPerFrame)) {
+                // remove a VSYNC
+                mTimeCorrection -= mVsyncPeriod / 2;
+                renderTime -= mVsyncPeriod / 2;
+                nextVsyncTime -= mVsyncPeriod;
+                --vsyncsForLastFrame;
+            } else if (mTimeCorrection < -correctionLimit &&
+                    (vsyncsPerFrameAreNearlyConstant || vsyncsForLastFrame == minVsyncsPerFrame)) {
+                // add a VSYNC
+                mTimeCorrection += mVsyncPeriod / 2;
+                renderTime += mVsyncPeriod / 2;
+                nextVsyncTime += mVsyncPeriod;
+                ++vsyncsForLastFrame;
+            }
+            ATRACE_INT("FRAME_VSYNCS", vsyncsForLastFrame);
+        }
+        mLastVsyncTime = nextVsyncTime;
+    }
+
+    // align rendertime to the center between VSYNC edges
+    renderTime -= (renderTime - mVsyncTime) % mVsyncPeriod;
+    renderTime += mVsyncPeriod / 2;
+    ALOGV("adjusting render: %lld => %lld", (long long)origRenderTime, (long long)renderTime);
+    ATRACE_INT("FRAME_FLIP_IN(ms)", (renderTime - now) / 1000000);
+    return renderTime;
+}
+
+void VideoFrameScheduler::release() {
+    mComposer.clear();
+}
+
+VideoFrameScheduler::~VideoFrameScheduler() {
+    release();
+}
+
+} // namespace android
+
diff --git a/media/libmediaplayerservice/VideoFrameScheduler.h b/media/libmediaplayerservice/VideoFrameScheduler.h
new file mode 100644
index 0000000..19f0787
--- /dev/null
+++ b/media/libmediaplayerservice/VideoFrameScheduler.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright 2014, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef VIDEO_FRAME_SCHEDULER_H_
+#define VIDEO_FRAME_SCHEDULER_H_
+
+#include <utils/RefBase.h>
+#include <utils/Timers.h>
+
+#include <media/stagefright/foundation/ABase.h>
+
+namespace android {
+
+struct ISurfaceComposer;
+
+struct VideoFrameScheduler : public RefBase {
+    VideoFrameScheduler();
+
+    // (re)initialize scheduler
+    void init(float videoFps = -1);
+    // use in case of video render-time discontinuity, e.g. seek
+    void restart();
+    // get adjusted nanotime for a video frame render at renderTime
+    nsecs_t schedule(nsecs_t renderTime);
+
+    // returns the vsync period for the main display
+    nsecs_t getVsyncPeriod();
+
+    void release();
+
+    static const size_t kHistorySize = 8;
+
+protected:
+    virtual ~VideoFrameScheduler();
+
+private:
+    struct PLL {
+        PLL();
+
+        // reset PLL to new PLL
+        void reset(float fps = -1);
+        // keep current estimate, but restart phase
+        void restart();
+        // returns period
+        nsecs_t addSample(nsecs_t time);
+
+    private:
+        nsecs_t mPeriod;
+        nsecs_t mPhase;
+
+        bool    mPrimed;        // have an estimate for the period
+        size_t  mSamplesUsedForPriming;
+
+        nsecs_t mLastTime;      // last input time
+        nsecs_t mRefitAt;       // next input time to fit at
+
+        size_t  mNumSamples;    // can go past kHistorySize
+        nsecs_t mTimes[kHistorySize];
+
+        void test();
+        void fit(nsecs_t phase, nsecs_t period, size_t numSamples,
+                int64_t *a, int64_t *b, int64_t *err);
+        void prime(size_t numSamples);
+    };
+
+    void updateVsync();
+
+    nsecs_t mVsyncTime;        // vsync timing from display
+    nsecs_t mVsyncPeriod;
+    nsecs_t mVsyncRefreshAt;   // next time to refresh timing info
+
+    nsecs_t mLastVsyncTime;    // estimated vsync time for last frame
+    nsecs_t mTimeCorrection;   // running adjustment
+
+    PLL mPll;                  // PLL for video frame rate based on render time
+
+    sp<ISurfaceComposer> mComposer;
+
+    DISALLOW_EVIL_CONSTRUCTORS(VideoFrameScheduler);
+};
+
+}  // namespace android
+
+#endif  // VIDEO_FRAME_SCHEDULER_H_
+
diff --git a/media/libmediaplayerservice/nuplayer/Android.mk b/media/libmediaplayerservice/nuplayer/Android.mk
index 0dd2b61..676c0a6 100644
--- a/media/libmediaplayerservice/nuplayer/Android.mk
+++ b/media/libmediaplayerservice/nuplayer/Android.mk
@@ -19,6 +19,7 @@
 	$(TOP)/frameworks/av/media/libstagefright/mpeg2ts             \
 	$(TOP)/frameworks/av/media/libstagefright/rtsp                \
 	$(TOP)/frameworks/av/media/libstagefright/timedtext           \
+	$(TOP)/frameworks/av/media/libmediaplayerservice              \
 	$(TOP)/frameworks/native/include/media/openmax
 
 LOCAL_MODULE:= libstagefright_nuplayer
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.cpp b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
index a0870fd..bd75034 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
@@ -106,6 +106,10 @@
     return OK;
 }
 
+sp<MetaData> NuPlayer::GenericSource::getFileFormatMeta() const {
+    return mFileMeta;
+}
+
 status_t NuPlayer::GenericSource::initFromDataSource() {
     sp<MediaExtractor> extractor;
 
@@ -144,17 +148,22 @@
         checkDrmStatus(mDataSource);
     }
 
-    sp<MetaData> fileMeta = extractor->getMetaData();
-    if (fileMeta != NULL) {
+    mFileMeta = extractor->getMetaData();
+    if (mFileMeta != NULL) {
         int64_t duration;
-        if (fileMeta->findInt64(kKeyDuration, &duration)) {
+        if (mFileMeta->findInt64(kKeyDuration, &duration)) {
             mDurationUs = duration;
         }
     }
 
     int32_t totalBitrate = 0;
 
-    for (size_t i = 0; i < extractor->countTracks(); ++i) {
+    size_t numtracks = extractor->countTracks();
+    if (numtracks == 0) {
+        return UNKNOWN_ERROR;
+    }
+
+    for (size_t i = 0; i < numtracks; ++i) {
         sp<MediaSource> track = extractor->getTrack(i);
 
         sp<MetaData> meta = extractor->getTrackMetaData(i);
@@ -464,6 +473,15 @@
     mStarted = true;
 }
 
+void NuPlayer::GenericSource::disconnect() {
+    if (mDataSource != NULL) {
+        // disconnect data source
+        if (mDataSource->flags() & DataSource::kIsCachingDataSource) {
+            static_cast<NuCachedSource2 *>(mDataSource.get())->disconnect();
+        }
+    }
+}
+
 void NuPlayer::GenericSource::setDrmPlaybackStatusIfNeeded(int playbackStatus, int64_t position) {
     if (mDecryptHandle != NULL) {
         mDrmManagerClient->setPlaybackStatus(mDecryptHandle, playbackStatus, position);
@@ -942,7 +960,7 @@
     ALOGV("%s track: %zu", select ? "select" : "deselect", trackIndex);
     sp<AMessage> msg = new AMessage(kWhatSelectTrack, id());
     msg->setInt32("trackIndex", trackIndex);
-    msg->setInt32("select", trackIndex);
+    msg->setInt32("select", select);
 
     sp<AMessage> response;
     status_t err = msg->postAndAwaitResponse(&response);
@@ -1247,6 +1265,8 @@
 
             sp<ABuffer> buffer = mediaBufferToABuffer(mbuf, trackType, actualTimeUs);
             track->mPackets->queueAccessUnit(buffer);
+            formatChange = false;
+            seeking = false;
             ++numBuffers;
         } else if (err == WOULD_BLOCK) {
             break;
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.h b/media/libmediaplayerservice/nuplayer/GenericSource.h
index c70c48e..24bb6af 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.h
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.h
@@ -55,8 +55,12 @@
     virtual void pause();
     virtual void resume();
 
+    virtual void disconnect();
+
     virtual status_t feedMoreTSData();
 
+    virtual sp<MetaData> getFileFormatMeta() const;
+
     virtual status_t dequeueAccessUnit(bool audio, sp<ABuffer> *accessUnit);
 
     virtual status_t getDuration(int64_t *durationUs);
@@ -123,6 +127,7 @@
     sp<DataSource> mDataSource;
     sp<NuCachedSource2> mCachedSource;
     sp<WVMExtractor> mWVMExtractor;
+    sp<MetaData> mFileMeta;
     DrmManagerClient *mDrmManagerClient;
     sp<DecryptHandle> mDecryptHandle;
     bool mStarted;
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
index 9020a8d..dad480d 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
@@ -310,6 +310,16 @@
 }
 
 void NuPlayer::resetAsync() {
+    if (mSource != NULL) {
+        // During a reset, the data source might be unresponsive already, we need to
+        // disconnect explicitly so that reads exit promptly.
+        // We can't queue the disconnect request to the looper, as it might be
+        // queued behind a stuck read and never gets processed.
+        // Doing a disconnect outside the looper to allows the pending reads to exit
+        // (either successfully or with error).
+        mSource->disconnect();
+    }
+
     (new AMessage(kWhatReset, id()))->post();
 }
 
@@ -633,6 +643,13 @@
             mRendererLooper->start(false, false, ANDROID_PRIORITY_AUDIO);
             mRendererLooper->registerHandler(mRenderer);
 
+            sp<MetaData> meta = getFileMeta();
+            int32_t rate;
+            if (meta != NULL
+                    && meta->findInt32(kKeyFrameRate, &rate) && rate > 0) {
+                mRenderer->setVideoFrameRate(rate);
+            }
+
             postScanSources();
             break;
         }
@@ -1260,8 +1277,8 @@
 
     // Aggregate smaller buffers into a larger buffer.
     // The goal is to reduce power consumption.
-    // Unfortunately this does not work with the software AAC decoder.
-    bool doBufferAggregation = (audio && mOffloadAudio);;
+    // Note this will not work if the decoder requires one frame per buffer.
+    bool doBufferAggregation = (audio && mOffloadAudio);
     bool needMoreData = false;
 
     bool dropAccessUnit;
@@ -1281,7 +1298,7 @@
             return err;
         } else if (err != OK) {
             if (err == INFO_DISCONTINUITY) {
-                if (mAggregateBuffer != NULL) {
+                if (doBufferAggregation && (mAggregateBuffer != NULL)) {
                     // We already have some data so save this for later.
                     mPendingAudioErr = err;
                     mPendingAudioAccessUnit = accessUnit;
@@ -1404,7 +1421,7 @@
             mAggregateBuffer->setRange(0, 0); // start empty
         }
 
-        if (mAggregateBuffer != NULL) {
+        if (doBufferAggregation && (mAggregateBuffer != NULL)) {
             int64_t timeUs;
             int64_t dummy;
             bool smallTimestampValid = accessUnit->meta()->findInt64("timeUs", &timeUs);
@@ -1453,7 +1470,7 @@
         mCCDecoder->decode(accessUnit);
     }
 
-    if (mAggregateBuffer != NULL) {
+    if (doBufferAggregation && (mAggregateBuffer != NULL)) {
         ALOGV("feedDecoderInputData() reply with aggregated buffer, %zu",
                 mAggregateBuffer->size());
         reply->setBuffer("buffer", mAggregateBuffer);
@@ -1720,6 +1737,10 @@
     return err;
 }
 
+sp<MetaData> NuPlayer::getFileMeta() {
+    return mSource->getFileFormatMeta();
+}
+
 void NuPlayer::schedulePollDuration() {
     sp<AMessage> msg = new AMessage(kWhatPollDuration, id());
     msg->setInt32("generation", mPollDurationGeneration);
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.h b/media/libmediaplayerservice/nuplayer/NuPlayer.h
index 2e951bd..7197e5f 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.h
@@ -67,6 +67,8 @@
     status_t getSelectedTrack(int32_t type, Parcel* reply) const;
     status_t selectTrack(size_t trackIndex, bool select);
 
+    sp<MetaData> getFileMeta();
+
     static const size_t kAggregateBufferSizeBytes;
 
 protected:
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
index 601cd40..cdb860c 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
@@ -465,7 +465,9 @@
     size_t bufferIx;
     CHECK(msg->findSize("buffer-ix", &bufferIx));
     if (msg->findInt32("render", &render) && render) {
-        err = mCodec->renderOutputBufferAndRelease(bufferIx);
+        int64_t timestampNs;
+        CHECK(msg->findInt64("timestampNs", &timestampNs));
+        err = mCodec->renderOutputBufferAndRelease(bufferIx, timestampNs);
     } else {
         err = mCodec->releaseOutputBuffer(bufferIx);
     }
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
index 7dd54c1..7ec9876 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
@@ -45,6 +45,7 @@
       mPlayerFlags(0),
       mAtEOS(false),
       mLooping(false),
+      mAutoLoop(false),
       mStartupSeekTimeUs(-1) {
     mLooper->setName("NuPlayerDriver Looper");
 
@@ -263,8 +264,22 @@
         case STATE_PAUSED:
         case STATE_STOPPED_AND_PREPARED:
         {
-            mPlayer->resume();
-            mPositionUs -= ALooper::GetNowUs() - mPauseStartedTimeUs;
+            if (mAtEOS) {
+                mPlayer->seekToAsync(0);
+                mAtEOS = false;
+                mPlayer->resume();
+                mPositionUs = -1;
+            } else {
+                mPlayer->resume();
+                if (mNotifyTimeRealUs != -1) {
+                    // Pause time must be set if here by setPauseStartedTimeIfNeeded().
+                    //CHECK(mPauseStartedTimeUs != -1);
+
+                    // if no seek occurs, adjust our notify time so that getCurrentPosition()
+                    // is continuous if read immediately after calling start().
+                    mNotifyTimeRealUs += ALooper::GetNowUs() - mPauseStartedTimeUs;
+                }
+            }
             break;
         }
 
@@ -371,15 +386,36 @@
     Mutex::Autolock autoLock(mLock);
 
     if (mPositionUs < 0) {
+        // mPositionUs is the media time.
+        // It is negative under these cases
+        // (1) == -1 after reset, or very first playback, no stream notification yet.
+        // (2) == -1 start after end of stream, no stream notification yet.
+        // (3) == large negative # after ~292,471 years of continuous playback.
+
+        //CHECK_EQ(mPositionUs, -1);
         *msec = 0;
     } else if (mNotifyTimeRealUs == -1) {
+        // A seek has occurred just occurred, no stream notification yet.
+        // mPositionUs (>= 0) is the new media position.
         *msec = mPositionUs / 1000;
     } else {
+        // mPosition must be valid (i.e. >= 0) by the first check above.
+        // We're either playing or have pause time set: mPauseStartedTimeUs is >= 0
+        //LOG_ALWAYS_FATAL_IF(
+        //        !isPlaying() && mPauseStartedTimeUs < 0,
+        //        "Player in non-playing mState(%d) and mPauseStartedTimeUs(%lld) < 0",
+        //        mState, (long long)mPauseStartedTimeUs);
+        ALOG_ASSERT(mNotifyTimeRealUs >= 0);
         int64_t nowUs =
                 (isPlaying() ?  ALooper::GetNowUs() : mPauseStartedTimeUs);
         *msec = (mPositionUs + nowUs - mNotifyTimeRealUs + 500ll) / 1000;
+        // It is possible for *msec to be negative if the media position is > 596 hours.
+        // but we turn on this checking in NDEBUG == 0 mode.
+        ALOG_ASSERT(*msec >= 0);
+        ALOGV("getCurrentPosition nowUs(%lld)", (long long)nowUs);
     }
-
+    ALOGV("getCurrentPosition returning(%d) mPositionUs(%lld) mNotifyRealTimeUs(%lld)",
+            *msec, (long long)mPositionUs, (long long)mNotifyTimeRealUs);
     return OK;
 }
 
@@ -498,6 +534,7 @@
 
 void NuPlayerDriver::setAudioSink(const sp<AudioSink> &audioSink) {
     mPlayer->setAudioSink(audioSink);
+    mAudioSink = audioSink;
 }
 
 status_t NuPlayerDriver::setParameter(
@@ -627,7 +664,8 @@
         case MEDIA_PLAYBACK_COMPLETE:
         {
             if (mState != STATE_RESET_IN_PROGRESS) {
-                if (mLooping) {
+                if (mLooping || (mAutoLoop
+                        && (mAudioSink == NULL || mAudioSink->realtime()))) {
                     mPlayer->seekToAsync(0);
                     break;
                 }
@@ -693,6 +731,13 @@
         }
     }
 
+    sp<MetaData> meta = mPlayer->getFileMeta();
+    int32_t loop;
+    if (meta != NULL
+            && meta->findInt32(kKeyAutoLoop, &loop) && loop != 0) {
+        mAutoLoop = true;
+    }
+
     mCondition.broadcast();
 }
 
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.h b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.h
index e81d605..f2bd431 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.h
@@ -114,10 +114,12 @@
 
     sp<ALooper> mLooper;
     sp<NuPlayer> mPlayer;
+    sp<AudioSink> mAudioSink;
     uint32_t mPlayerFlags;
 
     bool mAtEOS;
     bool mLooping;
+    bool mAutoLoop;
 
     int64_t mStartupSeekTimeUs;
 
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
index 067784b..a8c8818 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
@@ -26,6 +26,8 @@
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/MetaData.h>
 
+#include <VideoFrameScheduler.h>
+
 #include <inttypes.h>
 
 namespace android {
@@ -45,7 +47,7 @@
       mDrainVideoQueuePending(false),
       mAudioQueueGeneration(0),
       mVideoQueueGeneration(0),
-      mFirstAudioTimeUs(-1),
+      mFirstAnchorTimeMediaUs(-1),
       mAnchorTimeMediaUs(-1),
       mAnchorTimeRealUs(-1),
       mFlushingAudio(false),
@@ -54,12 +56,12 @@
       mHasVideo(false),
       mSyncQueues(false),
       mPaused(false),
+      mVideoSampleReceived(false),
       mVideoRenderingStarted(false),
       mVideoRenderingStartGeneration(0),
       mAudioRenderingStartGeneration(0),
       mLastPositionUpdateUs(-1ll),
-      mVideoLateByUs(0ll),
-      mVideoSampleReceived(false) {
+      mVideoLateByUs(0ll) {
 }
 
 NuPlayer::Renderer::~Renderer() {
@@ -115,6 +117,7 @@
     Mutex::Autolock autoLock(mLock);
     // CHECK(mAudioQueue.empty());
     // CHECK(mVideoQueue.empty());
+    mFirstAnchorTimeMediaUs = -1;
     mAnchorTimeMediaUs = -1;
     mAnchorTimeRealUs = -1;
     mSyncQueues = false;
@@ -136,6 +139,12 @@
     (new AMessage(kWhatResume, id()))->post();
 }
 
+void NuPlayer::Renderer::setVideoFrameRate(float fps) {
+    sp<AMessage> msg = new AMessage(kWhatSetVideoFrameRate, id());
+    msg->setFloat("frame-rate", fps);
+    msg->post();
+}
+
 void NuPlayer::Renderer::onMessageReceived(const sp<AMessage> &msg) {
     switch (msg->what()) {
         case kWhatStopAudioSink:
@@ -236,6 +245,14 @@
             break;
         }
 
+        case kWhatSetVideoFrameRate:
+        {
+            float fps;
+            CHECK(msg->findFloat("frame-rate", &fps));
+            onSetVideoFrameRate(fps);
+            break;
+        }
+
         case kWhatAudioOffloadTearDown:
         {
             onAudioOffloadTearDown();
@@ -339,19 +356,16 @@
             int64_t mediaTimeUs;
             CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
             ALOGV("rendering audio at media time %.2f secs", mediaTimeUs / 1E6);
-            if (mFirstAudioTimeUs == -1) {
-                mFirstAudioTimeUs = mediaTimeUs;
+            if (mFirstAnchorTimeMediaUs == -1) {
+                mFirstAnchorTimeMediaUs = mediaTimeUs;
             }
 
-            uint32_t numFramesPlayed;
-            CHECK_EQ(mAudioSink->getPosition(&numFramesPlayed), (status_t)OK);
+            int64_t nowUs = ALooper::GetNowUs();
+            mAnchorTimeMediaUs =
+                mFirstAnchorTimeMediaUs + getPlayedOutAudioDurationUs(nowUs);
+            mAnchorTimeRealUs = nowUs;
 
-            // TODO: figure out how to calculate initial latency.
-            // Otherwise, the initial time is not correct till the first sample
-            // is played.
-            mAnchorTimeMediaUs = mFirstAudioTimeUs
-                    + (numFramesPlayed * mAudioSink->msecsPerFrame()) * 1000ll;
-            mAnchorTimeRealUs = ALooper::GetNowUs();
+            notifyPosition();
         }
 
         size_t copy = entry->mBuffer->size() - entry->mOffset;
@@ -374,10 +388,6 @@
         notifyIfMediaRenderingStarted();
     }
 
-    if (sizeCopied != 0) {
-        notifyPosition();
-    }
-
     if (hasEOS) {
         (new AMessage(kWhatStopAudioSink, id()))->post();
     }
@@ -413,7 +423,7 @@
             // EOS
             int64_t postEOSDelayUs = 0;
             if (mAudioSink->needsTrailingPadding()) {
-                postEOSDelayUs = getAudioPendingPlayoutUs() + 1000 * mAudioSink->latency();
+                postEOSDelayUs = getPendingAudioPlayoutDurationUs(ALooper::GetNowUs());
             }
             notifyEOS(true /* audio */, entry->mFinalResult, postEOSDelayUs);
 
@@ -426,10 +436,15 @@
             int64_t mediaTimeUs;
             CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
             ALOGV("rendering audio at media time %.2f secs", mediaTimeUs / 1E6);
+            if (mFirstAnchorTimeMediaUs == -1) {
+                mFirstAnchorTimeMediaUs = mediaTimeUs;
+            }
             mAnchorTimeMediaUs = mediaTimeUs;
 
-            mAnchorTimeRealUs = ALooper::GetNowUs()
-                    + getAudioPendingPlayoutUs() + 1000 * mAudioSink->latency() / 2;
+            int64_t nowUs = ALooper::GetNowUs();
+            mAnchorTimeRealUs = nowUs + getPendingAudioPlayoutDurationUs(nowUs);
+
+            notifyPosition();
         }
 
         size_t copy = entry->mBuffer->size() - entry->mOffset;
@@ -478,17 +493,13 @@
             break;
         }
     }
-    notifyPosition();
-
     return !mAudioQueue.empty();
 }
 
-int64_t NuPlayer::Renderer::getAudioPendingPlayoutUs() {
-    uint32_t numFramesPlayed;
-    CHECK_EQ(mAudioSink->getPosition(&numFramesPlayed), (status_t)OK);
-
-    uint32_t numFramesPendingPlayout = mNumFramesWritten - numFramesPlayed;
-    return numFramesPendingPlayout * mAudioSink->msecsPerFrame() * 1000;
+int64_t NuPlayer::Renderer::getPendingAudioPlayoutDurationUs(int64_t nowUs) {
+    int64_t writtenAudioDurationUs =
+        mNumFramesWritten * 1000LL * mAudioSink->msecsPerFrame();
+    return writtenAudioDurationUs - getPlayedOutAudioDurationUs(nowUs);
 }
 
 void NuPlayer::Renderer::postDrainVideoQueue() {
@@ -507,37 +518,48 @@
     sp<AMessage> msg = new AMessage(kWhatDrainVideoQueue, id());
     msg->setInt32("generation", mVideoQueueGeneration);
 
-    int64_t delayUs;
-
     if (entry.mBuffer == NULL) {
         // EOS doesn't carry a timestamp.
-        delayUs = 0;
-    } else if (mFlags & FLAG_REAL_TIME) {
+        msg->post();
+        mDrainVideoQueuePending = true;
+        return;
+    }
+
+    int64_t delayUs;
+    int64_t nowUs = ALooper::GetNowUs();
+    int64_t realTimeUs;
+    if (mFlags & FLAG_REAL_TIME) {
         int64_t mediaTimeUs;
         CHECK(entry.mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
-
-        delayUs = mediaTimeUs - ALooper::GetNowUs();
+        realTimeUs = mediaTimeUs;
     } else {
         int64_t mediaTimeUs;
         CHECK(entry.mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
 
+        if (mFirstAnchorTimeMediaUs == -1 && !mHasAudio) {
+            mFirstAnchorTimeMediaUs = mediaTimeUs;
+        }
         if (mAnchorTimeMediaUs < 0) {
-            delayUs = 0;
-
             if (!mHasAudio) {
                 mAnchorTimeMediaUs = mediaTimeUs;
-                mAnchorTimeRealUs = ALooper::GetNowUs();
+                mAnchorTimeRealUs = nowUs;
+                notifyPosition();
             }
+            realTimeUs = nowUs;
         } else {
-            int64_t realTimeUs =
+            realTimeUs =
                 (mediaTimeUs - mAnchorTimeMediaUs) + mAnchorTimeRealUs;
-
-            delayUs = realTimeUs - ALooper::GetNowUs();
         }
     }
 
+    realTimeUs = mVideoScheduler->schedule(realTimeUs * 1000) / 1000;
+    int64_t twoVsyncsUs = 2 * (mVideoScheduler->getVsyncPeriod() / 1000);
+
+    delayUs = realTimeUs - nowUs;
+
     ALOGW_IF(delayUs > 500000, "unusually high delayUs: %" PRId64, delayUs);
-    msg->post(delayUs);
+    // post 2 display refreshes before rendering is due
+    msg->post(delayUs > twoVsyncsUs ? delayUs - twoVsyncsUs : 0);
 
     mDrainVideoQueuePending = true;
 }
@@ -558,8 +580,6 @@
         entry = NULL;
 
         mVideoLateByUs = 0ll;
-
-        notifyPosition();
         return;
     }
 
@@ -591,6 +611,7 @@
         mVideoLateByUs = 0ll;
     }
 
+    entry->mNotifyConsumed->setInt64("timestampNs", realTimeUs * 1000ll);
     entry->mNotifyConsumed->setInt32("render", !tooLate);
     entry->mNotifyConsumed->post();
     mVideoQueue.erase(mVideoQueue.begin());
@@ -605,8 +626,6 @@
         }
         notifyIfMediaRenderingStarted();
     }
-
-    notifyPosition();
 }
 
 void NuPlayer::Renderer::notifyVideoRenderingStart() {
@@ -635,6 +654,10 @@
         mHasAudio = true;
     } else {
         mHasVideo = true;
+        if (mVideoScheduler == NULL) {
+            mVideoScheduler = new VideoFrameScheduler();
+            mVideoScheduler->init();
+        }
     }
 
     if (dropBufferWhileFlushing(audio, msg)) {
@@ -783,7 +806,7 @@
             prepareForMediaRenderingStart();
 
             if (offloadingAudio()) {
-                mFirstAudioTimeUs = -1;
+                mFirstAnchorTimeMediaUs = -1;
             }
         }
 
@@ -800,6 +823,10 @@
         mDrainVideoQueuePending = false;
         ++mVideoQueueGeneration;
 
+        if (mVideoScheduler != NULL) {
+            mVideoScheduler->restart();
+        }
+
         prepareForMediaRenderingStart();
     }
 
@@ -871,9 +898,11 @@
 }
 
 void NuPlayer::Renderer::notifyPosition() {
-    if (mAnchorTimeRealUs < 0 || mAnchorTimeMediaUs < 0) {
-        return;
-    }
+    // notifyPosition() must be called only after setting mAnchorTimeRealUs
+    // and mAnchorTimeMediaUs, and must not be paused as it extrapolates position.
+    //CHECK_GE(mAnchorTimeRealUs, 0);
+    //CHECK_GE(mAnchorTimeMediaUs, 0);
+    //CHECK(!mPaused || !mHasAudio);  // video-only does display in paused mode.
 
     int64_t nowUs = ALooper::GetNowUs();
 
@@ -885,6 +914,18 @@
 
     int64_t positionUs = (nowUs - mAnchorTimeRealUs) + mAnchorTimeMediaUs;
 
+    //ALOGD("notifyPosition: positionUs(%lld) nowUs(%lld) mAnchorTimeRealUs(%lld)"
+    //        " mAnchorTimeMediaUs(%lld) mFirstAnchorTimeMediaUs(%lld)",
+    //        (long long)positionUs, (long long)nowUs, (long long)mAnchorTimeRealUs,
+    //        (long long)mAnchorTimeMediaUs, (long long)mFirstAnchorTimeMediaUs);
+
+    // Due to adding the latency to mAnchorTimeRealUs in onDrainAudioQueue(),
+    // positionUs may be less than the first media time.  This is avoided
+    // here to prevent potential retrograde motion of the position bar
+    // when starting up after a seek.
+    if (positionUs < mFirstAnchorTimeMediaUs) {
+        positionUs = mFirstAnchorTimeMediaUs;
+    }
     sp<AMessage> notify = mNotify->dup();
     notify->setInt32("what", kWhatPosition);
     notify->setInt64("positionUs", positionUs);
@@ -937,17 +978,87 @@
     }
 }
 
-void NuPlayer::Renderer::onAudioOffloadTearDown() {
-    uint32_t numFramesPlayed;
-    CHECK_EQ(mAudioSink->getPosition(&numFramesPlayed), (status_t)OK);
+void NuPlayer::Renderer::onSetVideoFrameRate(float fps) {
+    if (mVideoScheduler == NULL) {
+        mVideoScheduler = new VideoFrameScheduler();
+    }
+    mVideoScheduler->init(fps);
+}
 
+// TODO: Remove unnecessary calls to getPlayedOutAudioDurationUs()
+// as it acquires locks and may query the audio driver.
+//
+// Some calls are not needed since notifyPosition() doesn't always deliver a message.
+// Some calls could conceivably retrieve extrapolated data instead of
+// accessing getTimestamp() or getPosition() every time a data buffer with
+// a media time is received.
+//
+int64_t NuPlayer::Renderer::getPlayedOutAudioDurationUs(int64_t nowUs) {
+    uint32_t numFramesPlayed;
+    int64_t numFramesPlayedAt;
+    AudioTimestamp ts;
+    static const int64_t kStaleTimestamp100ms = 100000;
+
+    status_t res = mAudioSink->getTimestamp(ts);
+    if (res == OK) {                 // case 1: mixing audio tracks and offloaded tracks.
+        numFramesPlayed = ts.mPosition;
+        numFramesPlayedAt =
+            ts.mTime.tv_sec * 1000000LL + ts.mTime.tv_nsec / 1000;
+        const int64_t timestampAge = nowUs - numFramesPlayedAt;
+        if (timestampAge > kStaleTimestamp100ms) {
+            // This is an audio FIXME.
+            // getTimestamp returns a timestamp which may come from audio mixing threads.
+            // After pausing, the MixerThread may go idle, thus the mTime estimate may
+            // become stale. Assuming that the MixerThread runs 20ms, with FastMixer at 5ms,
+            // the max latency should be about 25ms with an average around 12ms (to be verified).
+            // For safety we use 100ms.
+            ALOGW("getTimestamp: returned stale timestamp nowUs(%lld) numFramesPlayedAt(%lld)",
+                    (long long)nowUs, (long long)numFramesPlayedAt);
+            numFramesPlayedAt = nowUs - kStaleTimestamp100ms;
+        }
+        //ALOGD("getTimestamp: OK %d %lld", numFramesPlayed, (long long)numFramesPlayedAt);
+    } else if (res == WOULD_BLOCK) { // case 2: transitory state on start of a new track
+        numFramesPlayed = 0;
+        numFramesPlayedAt = nowUs;
+        //ALOGD("getTimestamp: WOULD_BLOCK %d %lld",
+        //        numFramesPlayed, (long long)numFramesPlayedAt);
+    } else {                         // case 3: transitory at new track or audio fast tracks.
+        res = mAudioSink->getPosition(&numFramesPlayed);
+        CHECK_EQ(res, (status_t)OK);
+        numFramesPlayedAt = nowUs;
+        numFramesPlayedAt += 1000LL * mAudioSink->latency() / 2; /* XXX */
+        //ALOGD("getPosition: %d %lld", numFramesPlayed, numFramesPlayedAt);
+    }
+
+    // TODO: remove the (int32_t) casting below as it may overflow at 12.4 hours.
+    //CHECK_EQ(numFramesPlayed & (1 << 31), 0);  // can't be negative until 12.4 hrs, test
+    int64_t durationUs = (int32_t)numFramesPlayed * 1000LL * mAudioSink->msecsPerFrame()
+            + nowUs - numFramesPlayedAt;
+    if (durationUs < 0) {
+        // Occurs when numFramesPlayed position is very small and the following:
+        // (1) In case 1, the time nowUs is computed before getTimestamp() is called and
+        //     numFramesPlayedAt is greater than nowUs by time more than numFramesPlayed.
+        // (2) In case 3, using getPosition and adding mAudioSink->latency() to
+        //     numFramesPlayedAt, by a time amount greater than numFramesPlayed.
+        //
+        // Both of these are transitory conditions.
+        ALOGW("getPlayedOutAudioDurationUs: negative timestamp %lld set to zero", (long long)durationUs);
+        durationUs = 0;
+    }
+    ALOGV("getPlayedOutAudioDurationUs(%lld) nowUs(%lld) frames(%u) framesAt(%lld)",
+            (long long)durationUs, (long long)nowUs, numFramesPlayed, (long long)numFramesPlayedAt);
+    return durationUs;
+}
+
+void NuPlayer::Renderer::onAudioOffloadTearDown() {
     int64_t firstAudioTimeUs;
     {
         Mutex::Autolock autoLock(mLock);
-        firstAudioTimeUs = mFirstAudioTimeUs;
+        firstAudioTimeUs = mFirstAnchorTimeMediaUs;
     }
-    int64_t currentPositionUs = firstAudioTimeUs
-            + (numFramesPlayed * mAudioSink->msecsPerFrame()) * 1000ll;
+
+    int64_t currentPositionUs =
+        firstAudioTimeUs + getPlayedOutAudioDurationUs(ALooper::GetNowUs());
 
     mAudioSink->stop();
     mAudioSink->flush();
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
index 5c7d2d7..e28071f 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
@@ -23,6 +23,7 @@
 namespace android {
 
 struct ABuffer;
+struct VideoFrameScheduler;
 
 struct NuPlayer::Renderer : public AHandler {
     enum Flags {
@@ -56,6 +57,8 @@
     void pause();
     void resume();
 
+    void setVideoFrameRate(float fps);
+
     enum {
         kWhatEOS                 = 'eos ',
         kWhatFlushComplete       = 'fluC',
@@ -82,6 +85,7 @@
         kWhatResume              = 'resm',
         kWhatStopAudioSink       = 'stpA',
         kWhatDisableOffloadAudio = 'noOA',
+        kWhatSetVideoFrameRate   = 'sVFR',
     };
 
     struct QueueEntry {
@@ -100,13 +104,14 @@
     List<QueueEntry> mAudioQueue;
     List<QueueEntry> mVideoQueue;
     uint32_t mNumFramesWritten;
+    sp<VideoFrameScheduler> mVideoScheduler;
 
     bool mDrainAudioQueuePending;
     bool mDrainVideoQueuePending;
     int32_t mAudioQueueGeneration;
     int32_t mVideoQueueGeneration;
 
-    int64_t mFirstAudioTimeUs;
+    int64_t mFirstAnchorTimeMediaUs;
     int64_t mAnchorTimeMediaUs;
     int64_t mAnchorTimeRealUs;
 
@@ -130,7 +135,8 @@
     size_t fillAudioBuffer(void *buffer, size_t size);
 
     bool onDrainAudioQueue();
-    int64_t getAudioPendingPlayoutUs();
+    int64_t getPendingAudioPlayoutDurationUs(int64_t nowUs);
+    int64_t getPlayedOutAudioDurationUs(int64_t nowUs);
     void postDrainAudioQueue_l(int64_t delayUs = 0);
 
     void onDrainVideoQueue();
@@ -146,6 +152,7 @@
     void onDisableOffloadAudio();
     void onPause();
     void onResume();
+    void onSetVideoFrameRate(float fps);
     void onAudioOffloadTearDown();
 
     void notifyEOS(bool audio, status_t finalResult, int64_t delayUs = 0);
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerSource.h b/media/libmediaplayerservice/nuplayer/NuPlayerSource.h
index 7ccf3b1..2f06c31 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerSource.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerSource.h
@@ -67,12 +67,16 @@
     virtual void pause() {}
     virtual void resume() {}
 
+    // Explicitly disconnect the underling data source
+    virtual void disconnect() {}
+
     // Returns OK iff more data was available,
     // an error or ERROR_END_OF_STREAM if not.
     virtual status_t feedMoreTSData() = 0;
 
     virtual sp<AMessage> getFormat(bool audio);
     virtual sp<MetaData> getFormatMeta(bool /* audio */) { return NULL; }
+    virtual sp<MetaData> getFileFormatMeta() const { return NULL; }
 
     virtual status_t dequeueAccessUnit(
             bool audio, sp<ABuffer> *accessUnit) = 0;
diff --git a/media/libstagefright/NuCachedSource2.cpp b/media/libstagefright/NuCachedSource2.cpp
index c1feff8..be2a873 100644
--- a/media/libstagefright/NuCachedSource2.cpp
+++ b/media/libstagefright/NuCachedSource2.cpp
@@ -191,6 +191,7 @@
       mFinalStatus(OK),
       mLastAccessPos(0),
       mFetching(true),
+      mDisconnecting(false),
       mLastFetchTimeUs(-1),
       mNumRetriesLeft(kMaxNumRetries),
       mHighwaterThresholdBytes(kDefaultHighWaterThreshold),
@@ -244,6 +245,23 @@
     return ERROR_UNSUPPORTED;
 }
 
+void NuCachedSource2::disconnect() {
+    if (mSource->flags() & kIsHTTPBasedSource) {
+        ALOGV("disconnecting HTTPBasedSource");
+
+        {
+            Mutex::Autolock autoLock(mLock);
+            // set mDisconnecting to true, if a fetch returns after
+            // this, the source will be marked as EOS.
+            mDisconnecting = true;
+        }
+
+        // explicitly disconnect from the source, to allow any
+        // pending reads to return more promptly
+        static_cast<HTTPBase *>(mSource.get())->disconnect();
+    }
+}
+
 status_t NuCachedSource2::setCacheStatCollectFreq(int32_t freqMs) {
     if (mSource->flags() & kIsHTTPBasedSource) {
         HTTPBase *source = static_cast<HTTPBase *>(mSource.get());
@@ -327,7 +345,14 @@
 
     Mutex::Autolock autoLock(mLock);
 
-    if (n < 0) {
+    if (n == 0 || mDisconnecting) {
+        ALOGI("ERROR_END_OF_STREAM");
+
+        mNumRetriesLeft = 0;
+        mFinalStatus = ERROR_END_OF_STREAM;
+
+        mCache->releasePage(page);
+    } else if (n < 0) {
         mFinalStatus = n;
         if (n == ERROR_UNSUPPORTED || n == -EPIPE) {
             // These are errors that are not likely to go away even if we
@@ -337,13 +362,6 @@
 
         ALOGE("source returned error %zd, %d retries left", n, mNumRetriesLeft);
         mCache->releasePage(page);
-    } else if (n == 0) {
-        ALOGI("ERROR_END_OF_STREAM");
-
-        mNumRetriesLeft = 0;
-        mFinalStatus = ERROR_END_OF_STREAM;
-
-        mCache->releasePage(page);
     } else {
         if (mFinalStatus != OK) {
             ALOGI("retrying a previously failed read succeeded.");
diff --git a/media/libstagefright/codecs/aacdec/SoftAAC2.cpp b/media/libstagefright/codecs/aacdec/SoftAAC2.cpp
index 4569c1c..fb27dca 100644
--- a/media/libstagefright/codecs/aacdec/SoftAAC2.cpp
+++ b/media/libstagefright/codecs/aacdec/SoftAAC2.cpp
@@ -137,6 +137,7 @@
     mOutputDelayRingBuffer = new short[mOutputDelayRingBufferSize];
     mOutputDelayRingBufferWritePos = 0;
     mOutputDelayRingBufferReadPos = 0;
+    mOutputDelayRingBufferFilled = 0;
 
     if (mAACDecoder == NULL) {
         ALOGE("AAC decoder is null. TODO: Can not call aacDecoder_SetParam in the following code");
@@ -408,6 +409,13 @@
 }
 
 bool SoftAAC2::outputDelayRingBufferPutSamples(INT_PCM *samples, int32_t numSamples) {
+    if (numSamples == 0) {
+        return true;
+    }
+    if (outputDelayRingBufferSpaceLeft() < numSamples) {
+        ALOGE("RING BUFFER WOULD OVERFLOW");
+        return false;
+    }
     if (mOutputDelayRingBufferWritePos + numSamples <= mOutputDelayRingBufferSize
             && (mOutputDelayRingBufferReadPos <= mOutputDelayRingBufferWritePos
                     || mOutputDelayRingBufferReadPos > mOutputDelayRingBufferWritePos + numSamples)) {
@@ -419,10 +427,6 @@
         if (mOutputDelayRingBufferWritePos >= mOutputDelayRingBufferSize) {
             mOutputDelayRingBufferWritePos -= mOutputDelayRingBufferSize;
         }
-        if (mOutputDelayRingBufferWritePos == mOutputDelayRingBufferReadPos) {
-            ALOGE("RING BUFFER OVERFLOW");
-            return false;
-        }
     } else {
         ALOGV("slow SoftAAC2::outputDelayRingBufferPutSamples()");
 
@@ -432,16 +436,19 @@
             if (mOutputDelayRingBufferWritePos >= mOutputDelayRingBufferSize) {
                 mOutputDelayRingBufferWritePos -= mOutputDelayRingBufferSize;
             }
-            if (mOutputDelayRingBufferWritePos == mOutputDelayRingBufferReadPos) {
-                ALOGE("RING BUFFER OVERFLOW");
-                return false;
-            }
         }
     }
+    mOutputDelayRingBufferFilled += numSamples;
     return true;
 }
 
 int32_t SoftAAC2::outputDelayRingBufferGetSamples(INT_PCM *samples, int32_t numSamples) {
+
+    if (numSamples > mOutputDelayRingBufferFilled) {
+        ALOGE("RING BUFFER WOULD UNDERRUN");
+        return -1;
+    }
+
     if (mOutputDelayRingBufferReadPos + numSamples <= mOutputDelayRingBufferSize
             && (mOutputDelayRingBufferWritePos < mOutputDelayRingBufferReadPos
                     || mOutputDelayRingBufferWritePos >= mOutputDelayRingBufferReadPos + numSamples)) {
@@ -460,10 +467,6 @@
         ALOGV("slow SoftAAC2::outputDelayRingBufferGetSamples()");
 
         for (int32_t i = 0; i < numSamples; i++) {
-            if (mOutputDelayRingBufferWritePos == mOutputDelayRingBufferReadPos) {
-                ALOGE("RING BUFFER UNDERRUN");
-                return -1;
-            }
             if (samples != 0) {
                 samples[i] = mOutputDelayRingBuffer[mOutputDelayRingBufferReadPos];
             }
@@ -473,22 +476,15 @@
             }
         }
     }
+    mOutputDelayRingBufferFilled -= numSamples;
     return numSamples;
 }
 
 int32_t SoftAAC2::outputDelayRingBufferSamplesAvailable() {
-    int32_t available = mOutputDelayRingBufferWritePos - mOutputDelayRingBufferReadPos;
-    if (available < 0) {
-        available += mOutputDelayRingBufferSize;
-    }
-    if (available < 0) {
-        ALOGE("FATAL RING BUFFER ERROR");
-        return 0;
-    }
-    return available;
+    return mOutputDelayRingBufferFilled;
 }
 
-int32_t SoftAAC2::outputDelayRingBufferSamplesLeft() {
+int32_t SoftAAC2::outputDelayRingBufferSpaceLeft() {
     return mOutputDelayRingBufferSize - outputDelayRingBufferSamplesAvailable();
 }
 
@@ -512,6 +508,11 @@
             OMX_BUFFERHEADERTYPE *inHeader = inInfo->mHeader;
 
             mEndOfInput = (inHeader->nFlags & OMX_BUFFERFLAG_EOS) != 0;
+
+            if (mInputBufferCount == 0 && !(inHeader->nFlags & OMX_BUFFERFLAG_CODECCONFIG)) {
+                ALOGE("first buffer should have OMX_BUFFERFLAG_CODECCONFIG set");
+                inHeader->nFlags |= OMX_BUFFERFLAG_CODECCONFIG;
+            }
             if ((inHeader->nFlags & OMX_BUFFERFLAG_CODECCONFIG) != 0) {
                 BufferInfo *inInfo = *inQueue.begin();
                 OMX_BUFFERHEADERTYPE *inHeader = inInfo->mHeader;
@@ -650,7 +651,7 @@
 
             AAC_DECODER_ERROR decoderErr;
             do {
-                if (outputDelayRingBufferSamplesLeft() <
+                if (outputDelayRingBufferSpaceLeft() <
                         (mStreamInfo->frameSize * mStreamInfo->numChannels)) {
                     ALOGV("skipping decode: not enough space left in ringbuffer");
                     break;
@@ -705,7 +706,9 @@
                     }
 
                     // Discard input buffer.
-                    inHeader->nFilledLen = 0;
+                    if (inHeader) {
+                        inHeader->nFilledLen = 0;
+                    }
 
                     aacDecoder_SetParam(mAACDecoder, AAC_TPDEC_CLEAR_BUFFER, 1);
 
@@ -736,7 +739,7 @@
                         notify(OMX_EventPortSettingsChanged, 1, 0, NULL);
                         mOutputPortSettingsChange = AWAITING_DISABLED;
 
-                        if (inHeader->nFilledLen == 0) {
+                        if (inHeader && inHeader->nFilledLen == 0) {
                             inInfo->mOwnedByUs = false;
                             mInputBufferCount++;
                             inQueue.erase(inQueue.begin());
@@ -1022,6 +1025,7 @@
     mOutputDelayCompensated = 0;
     mOutputDelayRingBufferWritePos = 0;
     mOutputDelayRingBufferReadPos = 0;
+    mOutputDelayRingBufferFilled = 0;
     mEndOfInput = false;
     mEndOfOutput = false;
     mBufferTimestamps.clear();
diff --git a/media/libstagefright/codecs/aacdec/SoftAAC2.h b/media/libstagefright/codecs/aacdec/SoftAAC2.h
index 9fcb598..c3e4459 100644
--- a/media/libstagefright/codecs/aacdec/SoftAAC2.h
+++ b/media/libstagefright/codecs/aacdec/SoftAAC2.h
@@ -85,10 +85,11 @@
     short *mOutputDelayRingBuffer;
     int32_t mOutputDelayRingBufferWritePos;
     int32_t mOutputDelayRingBufferReadPos;
+    int32_t mOutputDelayRingBufferFilled;
     bool outputDelayRingBufferPutSamples(INT_PCM *samples, int numSamples);
     int32_t outputDelayRingBufferGetSamples(INT_PCM *samples, int numSamples);
     int32_t outputDelayRingBufferSamplesAvailable();
-    int32_t outputDelayRingBufferSamplesLeft();
+    int32_t outputDelayRingBufferSpaceLeft();
 
     DISALLOW_EVIL_CONSTRUCTORS(SoftAAC2);
 };
diff --git a/media/libstagefright/httplive/LiveSession.cpp b/media/libstagefright/httplive/LiveSession.cpp
index 3720085..a289637 100644
--- a/media/libstagefright/httplive/LiveSession.cpp
+++ b/media/libstagefright/httplive/LiveSession.cpp
@@ -63,6 +63,7 @@
       mSwapMask(0),
       mCheckBandwidthGeneration(0),
       mSwitchGeneration(0),
+      mSubtitleGeneration(0),
       mLastDequeuedTimeUs(0ll),
       mRealTimeBaseUs(0ll),
       mReconfigurationInProgress(false),
@@ -158,9 +159,16 @@
 
     // wait for counterpart
     sp<AnotherPacketSource> otherSource;
-    if (stream == STREAMTYPE_AUDIO && (mStreamMask & STREAMTYPE_VIDEO)) {
+    uint32_t mask = mNewStreamMask & mStreamMask;
+    uint32_t fetchersMask  = 0;
+    for (size_t i = 0; i < mFetcherInfos.size(); ++i) {
+        uint32_t fetcherMask = mFetcherInfos.valueAt(i).mFetcher->getStreamTypeMask();
+        fetchersMask |= fetcherMask;
+    }
+    mask &= fetchersMask;
+    if (stream == STREAMTYPE_AUDIO && (mask & STREAMTYPE_VIDEO)) {
         otherSource = mPacketSources.valueFor(STREAMTYPE_VIDEO);
-    } else if (stream == STREAMTYPE_VIDEO && (mStreamMask & STREAMTYPE_AUDIO)) {
+    } else if (stream == STREAMTYPE_VIDEO && (mask & STREAMTYPE_AUDIO)) {
         otherSource = mPacketSources.valueFor(STREAMTYPE_AUDIO);
     }
     if (otherSource != NULL && !otherSource->hasBufferAvailable(&finalResult)) {
@@ -282,6 +290,11 @@
             mLastDequeuedTimeUs = timeUs;
             mRealTimeBaseUs = ALooper::GetNowUs() - timeUs;
         } else if (stream == STREAMTYPE_SUBTITLES) {
+            int32_t subtitleGeneration;
+            if ((*accessUnit)->meta()->findInt32("subtitleGeneration", &subtitleGeneration)
+                    && subtitleGeneration != mSubtitleGeneration) {
+               return -EAGAIN;
+            };
             (*accessUnit)->meta()->setInt32(
                     "trackIndex", mPlaylist->getSelectedIndex());
             (*accessUnit)->meta()->setInt64("baseUs", mRealTimeBaseUs);
@@ -436,6 +449,23 @@
 
                     ALOGE("XXX Received error %d from PlaylistFetcher.", err);
 
+                    // handle EOS on subtitle tracks independently
+                    AString uri;
+                    if (err == ERROR_END_OF_STREAM && msg->findString("uri", &uri)) {
+                        ssize_t i = mFetcherInfos.indexOfKey(uri);
+                        if (i >= 0) {
+                            const sp<PlaylistFetcher> &fetcher = mFetcherInfos.valueAt(i).mFetcher;
+                            if (fetcher != NULL) {
+                                uint32_t type = fetcher->getStreamTypeMask();
+                                if (type == STREAMTYPE_SUBTITLES) {
+                                    mPacketSources.valueFor(
+                                            STREAMTYPE_SUBTITLES)->signalEOS(err);;
+                                    break;
+                                }
+                            }
+                        }
+                    }
+
                     if (mInPreparationPhase) {
                         postPrepared(err);
                     }
@@ -735,7 +765,7 @@
     notify->setInt32("switchGeneration", mSwitchGeneration);
 
     FetcherInfo info;
-    info.mFetcher = new PlaylistFetcher(notify, this, uri);
+    info.mFetcher = new PlaylistFetcher(notify, this, uri, mSubtitleGeneration);
     info.mDurationUs = -1ll;
     info.mIsPrepared = false;
     info.mToBeRemoved = false;
@@ -1041,6 +1071,24 @@
     return index;
 }
 
+int64_t LiveSession::latestMediaSegmentStartTimeUs() {
+    sp<AMessage> audioMeta = mPacketSources.valueFor(STREAMTYPE_AUDIO)->getLatestDequeuedMeta();
+    int64_t minSegmentStartTimeUs = -1, videoSegmentStartTimeUs = -1;
+    if (audioMeta != NULL) {
+        audioMeta->findInt64("segmentStartTimeUs", &minSegmentStartTimeUs);
+    }
+
+    sp<AMessage> videoMeta = mPacketSources.valueFor(STREAMTYPE_VIDEO)->getLatestDequeuedMeta();
+    if (videoMeta != NULL
+            && videoMeta->findInt64("segmentStartTimeUs", &videoSegmentStartTimeUs)) {
+        if (minSegmentStartTimeUs < 0 || videoSegmentStartTimeUs < minSegmentStartTimeUs) {
+            minSegmentStartTimeUs = videoSegmentStartTimeUs;
+        }
+
+    }
+    return minSegmentStartTimeUs;
+}
+
 status_t LiveSession::onSeek(const sp<AMessage> &msg) {
     int64_t timeUs;
     CHECK(msg->findInt64("timeUs", &timeUs));
@@ -1093,6 +1141,11 @@
 }
 
 status_t LiveSession::selectTrack(size_t index, bool select) {
+    if (mPlaylist == NULL) {
+        return INVALID_OPERATION;
+    }
+
+    ++mSubtitleGeneration;
     status_t err = mPlaylist->selectTrack(index, select);
     if (err == OK) {
         sp<AMessage> msg = new AMessage(kWhatChangeConfiguration, id());
@@ -1375,6 +1428,10 @@
         int32_t discontinuitySeq = -1;
         sp<AnotherPacketSource> sources[kMaxStreams];
 
+        if (i == kSubtitleIndex) {
+            segmentStartTimeUs = latestMediaSegmentStartTimeUs();
+        }
+
         // TRICKY: looping from i as earlier streams are already removed from streamMask
         for (size_t j = i; j < kMaxStreams; ++j) {
             const AString &streamUri = switching ? mStreams[j].mNewUri : mStreams[j].mUri;
diff --git a/media/libstagefright/httplive/LiveSession.h b/media/libstagefright/httplive/LiveSession.h
index 6be86cf..7aacca6 100644
--- a/media/libstagefright/httplive/LiveSession.h
+++ b/media/libstagefright/httplive/LiveSession.h
@@ -189,6 +189,7 @@
 
     int32_t mCheckBandwidthGeneration;
     int32_t mSwitchGeneration;
+    int32_t mSubtitleGeneration;
 
     size_t mContinuationCounter;
     sp<AMessage> mContinuation;
@@ -240,6 +241,7 @@
             const char *url, uint8_t *curPlaylistHash, bool *unchanged);
 
     size_t getBandwidthIndex();
+    int64_t latestMediaSegmentStartTimeUs();
 
     static int SortByBandwidth(const BandwidthItem *, const BandwidthItem *);
     static StreamType indexToType(int idx);
diff --git a/media/libstagefright/httplive/PlaylistFetcher.cpp b/media/libstagefright/httplive/PlaylistFetcher.cpp
index 1166762..30fa868 100644
--- a/media/libstagefright/httplive/PlaylistFetcher.cpp
+++ b/media/libstagefright/httplive/PlaylistFetcher.cpp
@@ -55,7 +55,8 @@
 PlaylistFetcher::PlaylistFetcher(
         const sp<AMessage> &notify,
         const sp<LiveSession> &session,
-        const char *uri)
+        const char *uri,
+        int32_t subtitleGeneration)
     : mNotify(notify),
       mStartTimeUsNotify(notify->dup()),
       mSession(session),
@@ -73,6 +74,7 @@
       mPrepared(false),
       mNextPTSTimeUs(-1ll),
       mMonitorQueueGeneration(0),
+      mSubtitleGeneration(subtitleGeneration),
       mRefreshState(INITIAL_MINIMUM_RELOAD_DELAY),
       mFirstPTSValid(false),
       mAbsoluteTimeAnchorUs(0ll),
@@ -1009,7 +1011,16 @@
 
     // bulk extract non-ts files
     if (tsBuffer == NULL) {
-      err = extractAndQueueAccessUnits(buffer, itemMeta);
+        err = extractAndQueueAccessUnits(buffer, itemMeta);
+        if (err == -EAGAIN) {
+            // starting sequence number too low/high
+            postMonitorQueue();
+            return;
+        } else if (err == ERROR_OUT_OF_RANGE) {
+            // reached stopping point
+            stopAsync(/* clear = */false);
+            return;
+        }
     }
 
     if (err != OK) {
@@ -1398,6 +1409,7 @@
         buffer->meta()->setInt64("durationUs", durationUs);
         buffer->meta()->setInt64("segmentStartTimeUs", getSegmentStartTimeUs(mSeqNumber));
         buffer->meta()->setInt32("discontinuitySeq", mDiscontinuitySeq);
+        buffer->meta()->setInt32("subtitleGeneration", mSubtitleGeneration);
 
         packetSource->queueAccessUnit(buffer);
         return OK;
@@ -1552,14 +1564,52 @@
             if (startTimeUs < mStartTimeUs) {
                 continue;
             }
+
+            if (mStartTimeUsNotify != NULL) {
+                int32_t targetDurationSecs;
+                CHECK(mPlaylist->meta()->findInt32("target-duration", &targetDurationSecs));
+                int64_t targetDurationUs = targetDurationSecs * 1000000ll;
+
+                // Duplicated logic from how we handle .ts playlists.
+                if (mStartup && mSegmentStartTimeUs >= 0
+                        && timeUs - mStartTimeUs > targetDurationUs) {
+                    int32_t newSeqNumber = getSeqNumberWithAnchorTime(timeUs);
+                    if (newSeqNumber >= mSeqNumber) {
+                        --mSeqNumber;
+                    } else {
+                        mSeqNumber = newSeqNumber;
+                    }
+                    return -EAGAIN;
+                }
+
+                mStartTimeUsNotify->setInt64("timeUsAudio", timeUs);
+                mStartTimeUsNotify->setInt32("discontinuitySeq", mDiscontinuitySeq);
+                mStartTimeUsNotify->setInt32("streamMask", LiveSession::STREAMTYPE_AUDIO);
+                mStartTimeUsNotify->post();
+                mStartTimeUsNotify.clear();
+            }
+        }
+
+        if (mStopParams != NULL) {
+            // Queue discontinuity in original stream.
+            int32_t discontinuitySeq;
+            int64_t stopTimeUs;
+            if (!mStopParams->findInt32("discontinuitySeq", &discontinuitySeq)
+                    || discontinuitySeq > mDiscontinuitySeq
+                    || !mStopParams->findInt64("timeUsAudio", &stopTimeUs)
+                    || (discontinuitySeq == mDiscontinuitySeq && unitTimeUs >= stopTimeUs)) {
+                packetSource->queueAccessUnit(mSession->createFormatChangeBuffer());
+                mStreamTypeMask = 0;
+                mPacketSources.clear();
+                return ERROR_OUT_OF_RANGE;
+            }
         }
 
         sp<ABuffer> unit = new ABuffer(aac_frame_length);
         memcpy(unit->data(), adtsHeader, aac_frame_length);
 
         unit->meta()->setInt64("timeUs", unitTimeUs);
-        unit->meta()->setInt64("segmentStartTimeUs", getSegmentStartTimeUs(mSeqNumber));
-        unit->meta()->setInt32("discontinuitySeq", mDiscontinuitySeq);
+        setAccessUnitProperties(unit, packetSource);
         packetSource->queueAccessUnit(unit);
     }
 
diff --git a/media/libstagefright/httplive/PlaylistFetcher.h b/media/libstagefright/httplive/PlaylistFetcher.h
index 4ba37fa..78c358f 100644
--- a/media/libstagefright/httplive/PlaylistFetcher.h
+++ b/media/libstagefright/httplive/PlaylistFetcher.h
@@ -49,7 +49,8 @@
     PlaylistFetcher(
             const sp<AMessage> &notify,
             const sp<LiveSession> &session,
-            const char *uri);
+            const char *uri,
+            int32_t subtitleGeneration);
 
     sp<DataSource> getDataSource();
 
@@ -69,6 +70,10 @@
 
     void resumeUntilAsync(const sp<AMessage> &params);
 
+    uint32_t getStreamTypeMask() const {
+        return mStreamTypeMask;
+    }
+
 protected:
     virtual ~PlaylistFetcher();
     virtual void onMessageReceived(const sp<AMessage> &msg);
@@ -129,6 +134,7 @@
     int64_t mNextPTSTimeUs;
 
     int32_t mMonitorQueueGeneration;
+    const int32_t mSubtitleGeneration;
 
     enum RefreshState {
         INITIAL_MINIMUM_RELOAD_DELAY,
diff --git a/media/libstagefright/include/NuCachedSource2.h b/media/libstagefright/include/NuCachedSource2.h
index 5db4b4b..4252706 100644
--- a/media/libstagefright/include/NuCachedSource2.h
+++ b/media/libstagefright/include/NuCachedSource2.h
@@ -37,6 +37,8 @@
 
     virtual ssize_t readAt(off64_t offset, void *data, size_t size);
 
+    virtual void disconnect();
+
     virtual status_t getSize(off64_t *size);
     virtual uint32_t flags();
 
@@ -103,6 +105,7 @@
     off64_t mLastAccessPos;
     sp<AMessage> mAsyncResult;
     bool mFetching;
+    bool mDisconnecting;
     int64_t mLastFetchTimeUs;
 
     int32_t mNumRetriesLeft;
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 3d17c89..818bb05 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -3991,8 +3991,6 @@
             minFrames = 1;
         }
 
-        ALOGI("prepareTracks_l minFrames %d state %d frames ready %d, ",
-              minFrames, track->mState, track->framesReady());
         if ((track->framesReady() >= minFrames) && track->isReady() && !track->isPaused() &&
                 !track->isStopping_2() && !track->isStopped())
         {
diff --git a/services/audiopolicy/AudioPolicyManager.cpp b/services/audiopolicy/AudioPolicyManager.cpp
index d5f6c1e..95ac070 100644
--- a/services/audiopolicy/AudioPolicyManager.cpp
+++ b/services/audiopolicy/AudioPolicyManager.cpp
@@ -2667,59 +2667,69 @@
                 continue;
             }
 
+            if ((outProfile->mFlags & AUDIO_OUTPUT_FLAG_DIRECT) != 0) {
+                continue;
+            }
             audio_devices_t profileType = outProfile->mSupportedDevices.types();
             if ((profileType & mDefaultOutputDevice->mDeviceType) != AUDIO_DEVICE_NONE) {
                 profileType = mDefaultOutputDevice->mDeviceType;
             } else {
-                profileType = outProfile->mSupportedDevices[0]->mDeviceType;
-            }
-            if ((profileType & outputDeviceTypes) &&
-                    ((outProfile->mFlags & AUDIO_OUTPUT_FLAG_DIRECT) == 0)) {
-                sp<AudioOutputDescriptor> outputDesc = new AudioOutputDescriptor(outProfile);
-
-                outputDesc->mDevice = profileType;
-                audio_config_t config = AUDIO_CONFIG_INITIALIZER;
-                config.sample_rate = outputDesc->mSamplingRate;
-                config.channel_mask = outputDesc->mChannelMask;
-                config.format = outputDesc->mFormat;
-                audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
-                status_t status = mpClientInterface->openOutput(outProfile->mModule->mHandle,
-                                                                &output,
-                                                                &config,
-                                                                &outputDesc->mDevice,
-                                                                String8(""),
-                                                                &outputDesc->mLatency,
-                                                                outputDesc->mFlags);
-
-                if (status != NO_ERROR) {
-                    ALOGW("Cannot open output stream for device %08x on hw module %s",
-                          outputDesc->mDevice,
-                          mHwModules[i]->mName);
-                } else {
-                    outputDesc->mSamplingRate = config.sample_rate;
-                    outputDesc->mChannelMask = config.channel_mask;
-                    outputDesc->mFormat = config.format;
-
-                    for (size_t k = 0; k  < outProfile->mSupportedDevices.size(); k++) {
-                        audio_devices_t type = outProfile->mSupportedDevices[k]->mDeviceType;
-                        ssize_t index =
-                                mAvailableOutputDevices.indexOf(outProfile->mSupportedDevices[k]);
-                        // give a valid ID to an attached device once confirmed it is reachable
-                        if ((index >= 0) && (mAvailableOutputDevices[index]->mId == 0)) {
-                            mAvailableOutputDevices[index]->mId = nextUniqueId();
-                            mAvailableOutputDevices[index]->mModule = mHwModules[i];
-                        }
+                // chose first device present in mSupportedDevices also part of
+                // outputDeviceTypes
+                for (size_t k = 0; k  < outProfile->mSupportedDevices.size(); k++) {
+                    profileType = outProfile->mSupportedDevices[k]->mDeviceType;
+                    if ((profileType & outputDeviceTypes) != 0) {
+                        break;
                     }
-                    if (mPrimaryOutput == 0 &&
-                            outProfile->mFlags & AUDIO_OUTPUT_FLAG_PRIMARY) {
-                        mPrimaryOutput = output;
-                    }
-                    addOutput(output, outputDesc);
-                    setOutputDevice(output,
-                                    outputDesc->mDevice,
-                                    true);
                 }
             }
+            if ((profileType & outputDeviceTypes) == 0) {
+                continue;
+            }
+            sp<AudioOutputDescriptor> outputDesc = new AudioOutputDescriptor(outProfile);
+
+            outputDesc->mDevice = profileType;
+            audio_config_t config = AUDIO_CONFIG_INITIALIZER;
+            config.sample_rate = outputDesc->mSamplingRate;
+            config.channel_mask = outputDesc->mChannelMask;
+            config.format = outputDesc->mFormat;
+            audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
+            status_t status = mpClientInterface->openOutput(outProfile->mModule->mHandle,
+                                                            &output,
+                                                            &config,
+                                                            &outputDesc->mDevice,
+                                                            String8(""),
+                                                            &outputDesc->mLatency,
+                                                            outputDesc->mFlags);
+
+            if (status != NO_ERROR) {
+                ALOGW("Cannot open output stream for device %08x on hw module %s",
+                      outputDesc->mDevice,
+                      mHwModules[i]->mName);
+            } else {
+                outputDesc->mSamplingRate = config.sample_rate;
+                outputDesc->mChannelMask = config.channel_mask;
+                outputDesc->mFormat = config.format;
+
+                for (size_t k = 0; k  < outProfile->mSupportedDevices.size(); k++) {
+                    audio_devices_t type = outProfile->mSupportedDevices[k]->mDeviceType;
+                    ssize_t index =
+                            mAvailableOutputDevices.indexOf(outProfile->mSupportedDevices[k]);
+                    // give a valid ID to an attached device once confirmed it is reachable
+                    if ((index >= 0) && (mAvailableOutputDevices[index]->mId == 0)) {
+                        mAvailableOutputDevices[index]->mId = nextUniqueId();
+                        mAvailableOutputDevices[index]->mModule = mHwModules[i];
+                    }
+                }
+                if (mPrimaryOutput == 0 &&
+                        outProfile->mFlags & AUDIO_OUTPUT_FLAG_PRIMARY) {
+                    mPrimaryOutput = output;
+                }
+                addOutput(output, outputDesc);
+                setOutputDevice(output,
+                                outputDesc->mDevice,
+                                true);
+            }
         }
         // open input streams needed to access attached devices to validate
         // mAvailableInputDevices list
@@ -2731,45 +2741,53 @@
                 ALOGW("Input profile contains no device on module %s", mHwModules[i]->mName);
                 continue;
             }
-
-            audio_devices_t profileType = inProfile->mSupportedDevices[0]->mDeviceType;
-            if (profileType & inputDeviceTypes) {
-                sp<AudioInputDescriptor> inputDesc = new AudioInputDescriptor(inProfile);
-
-                inputDesc->mInputSource = AUDIO_SOURCE_MIC;
-                inputDesc->mDevice = profileType;
-
-                audio_config_t config = AUDIO_CONFIG_INITIALIZER;
-                config.sample_rate = inputDesc->mSamplingRate;
-                config.channel_mask = inputDesc->mChannelMask;
-                config.format = inputDesc->mFormat;
-                audio_io_handle_t input = AUDIO_IO_HANDLE_NONE;
-                status_t status = mpClientInterface->openInput(inProfile->mModule->mHandle,
-                                                               &input,
-                                                               &config,
-                                                               &inputDesc->mDevice,
-                                                               String8(""),
-                                                               AUDIO_SOURCE_MIC,
-                                                               AUDIO_INPUT_FLAG_NONE);
-
-                if (status == NO_ERROR) {
-                    for (size_t k = 0; k  < inProfile->mSupportedDevices.size(); k++) {
-                        audio_devices_t type = inProfile->mSupportedDevices[k]->mDeviceType;
-                        ssize_t index =
-                                mAvailableInputDevices.indexOf(inProfile->mSupportedDevices[k]);
-                        // give a valid ID to an attached device once confirmed it is reachable
-                        if ((index >= 0) && (mAvailableInputDevices[index]->mId == 0)) {
-                            mAvailableInputDevices[index]->mId = nextUniqueId();
-                            mAvailableInputDevices[index]->mModule = mHwModules[i];
-                        }
-                    }
-                    mpClientInterface->closeInput(input);
-                } else {
-                    ALOGW("Cannot open input stream for device %08x on hw module %s",
-                          inputDesc->mDevice,
-                          mHwModules[i]->mName);
+            // chose first device present in mSupportedDevices also part of
+            // inputDeviceTypes
+            audio_devices_t profileType = AUDIO_DEVICE_NONE;
+            for (size_t k = 0; k  < inProfile->mSupportedDevices.size(); k++) {
+                profileType = inProfile->mSupportedDevices[k]->mDeviceType;
+                if (profileType & inputDeviceTypes) {
+                    break;
                 }
             }
+            if ((profileType & inputDeviceTypes) == 0) {
+                continue;
+            }
+            sp<AudioInputDescriptor> inputDesc = new AudioInputDescriptor(inProfile);
+
+            inputDesc->mInputSource = AUDIO_SOURCE_MIC;
+            inputDesc->mDevice = profileType;
+
+            audio_config_t config = AUDIO_CONFIG_INITIALIZER;
+            config.sample_rate = inputDesc->mSamplingRate;
+            config.channel_mask = inputDesc->mChannelMask;
+            config.format = inputDesc->mFormat;
+            audio_io_handle_t input = AUDIO_IO_HANDLE_NONE;
+            status_t status = mpClientInterface->openInput(inProfile->mModule->mHandle,
+                                                           &input,
+                                                           &config,
+                                                           &inputDesc->mDevice,
+                                                           String8(""),
+                                                           AUDIO_SOURCE_MIC,
+                                                           AUDIO_INPUT_FLAG_NONE);
+
+            if (status == NO_ERROR) {
+                for (size_t k = 0; k  < inProfile->mSupportedDevices.size(); k++) {
+                    audio_devices_t type = inProfile->mSupportedDevices[k]->mDeviceType;
+                    ssize_t index =
+                            mAvailableInputDevices.indexOf(inProfile->mSupportedDevices[k]);
+                    // give a valid ID to an attached device once confirmed it is reachable
+                    if ((index >= 0) && (mAvailableInputDevices[index]->mId == 0)) {
+                        mAvailableInputDevices[index]->mId = nextUniqueId();
+                        mAvailableInputDevices[index]->mModule = mHwModules[i];
+                    }
+                }
+                mpClientInterface->closeInput(input);
+            } else {
+                ALOGW("Cannot open input stream for device %08x on hw module %s",
+                      inputDesc->mDevice,
+                      mHwModules[i]->mName);
+            }
         }
     }
     // make sure all attached devices have been allocated a unique ID
diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp b/services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp
index de31e23..f110b66 100644
--- a/services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp
+++ b/services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp
@@ -573,6 +573,11 @@
                     continue;
                 }
                 uint8_t afMode = entry.data.u8[0];
+                if (afMode == ANDROID_CONTROL_AF_MODE_OFF) {
+                    // Skip all the ZSL buffer for manual AF mode, as we don't really
+                    // know the af state.
+                    continue;
+                }
 
                 // Check AF state if device has focuser and focus mode isn't fixed
                 if (mHasFocuser && !isFixedFocusMode(afMode)) {
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
index 80c797a..e3301aa 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
@@ -512,12 +512,24 @@
 
 status_t CameraDeviceClient::dump(int fd, const Vector<String16>& args) {
     String8 result;
-    result.appendFormat("CameraDeviceClient[%d] (%p) PID: %d, dump:\n",
+    result.appendFormat("CameraDeviceClient[%d] (%p) dump:\n",
             mCameraId,
-            getRemoteCallback()->asBinder().get(),
-            mClientPid);
-    result.append("  State: ");
+            getRemoteCallback()->asBinder().get());
+    result.appendFormat("  Current client: %s (PID %d, UID %u)\n",
+            String8(mClientPackageName).string(),
+            mClientPid, mClientUid);
 
+    result.append("  State:\n");
+    result.appendFormat("    Request ID counter: %d\n", mRequestIdCounter);
+    if (!mStreamMap.isEmpty()) {
+        result.append("    Current stream IDs:\n");
+        for (size_t i = 0; i < mStreamMap.size(); i++) {
+            result.appendFormat("      Stream %d\n", mStreamMap.valueAt(i));
+        }
+    } else {
+        result.append("    No streams configured.\n");
+    }
+    write(fd, result.string(), result.size());
     // TODO: print dynamic/request section from most recent requests
     mFrameProcessor->dump(fd, args);
 
diff --git a/services/camera/libcameraservice/api_pro/ProCamera2Client.cpp b/services/camera/libcameraservice/api_pro/ProCamera2Client.cpp
index f8823a3..2ea460f 100644
--- a/services/camera/libcameraservice/api_pro/ProCamera2Client.cpp
+++ b/services/camera/libcameraservice/api_pro/ProCamera2Client.cpp
@@ -336,11 +336,11 @@
             mCameraId,
             getRemoteCallback()->asBinder().get(),
             mClientPid);
-    result.append("  State: ");
+    result.append("  State:\n");
+    write(fd, result.string(), result.size());
 
     // TODO: print dynamic/request section from most recent requests
     mFrameProcessor->dump(fd, args);
-
     return dumpDevice(fd, args);
 }
 
diff --git a/services/camera/libcameraservice/common/Camera2ClientBase.cpp b/services/camera/libcameraservice/common/Camera2ClientBase.cpp
index 24d173c..d6db151 100644
--- a/services/camera/libcameraservice/common/Camera2ClientBase.cpp
+++ b/services/camera/libcameraservice/common/Camera2ClientBase.cpp
@@ -57,8 +57,10 @@
         mSharedCameraCallbacks(remoteCallback),
         mDeviceVersion(cameraService->getDeviceVersion(cameraId))
 {
-    ALOGI("Camera %d: Opened", cameraId);
+    ALOGI("Camera %d: Opened. Client: %s (PID %d, UID %d)", cameraId,
+            String8(clientPackageName).string(), clientPid, clientUid);
 
+    mInitialClientPid = clientPid;
     mDevice = CameraDeviceFactory::createDevice(cameraId);
     LOG_ALWAYS_FATAL_IF(mDevice == 0, "Device should never be NULL here.");
 }
@@ -114,7 +116,10 @@
 
     disconnect();
 
-    ALOGI("Closed Camera %d", TClientBase::mCameraId);
+    ALOGI("Closed Camera %d. Client was: %s (PID %d, UID %u)",
+            TClientBase::mCameraId,
+            String8(TClientBase::mClientPackageName).string(),
+            mInitialClientPid, TClientBase::mClientUid);
 }
 
 template <typename TClientBase>
diff --git a/services/camera/libcameraservice/common/Camera2ClientBase.h b/services/camera/libcameraservice/common/Camera2ClientBase.h
index f57d204..d198e4e 100644
--- a/services/camera/libcameraservice/common/Camera2ClientBase.h
+++ b/services/camera/libcameraservice/common/Camera2ClientBase.h
@@ -107,6 +107,9 @@
 
 protected:
 
+    // The PID provided in the constructor call
+    pid_t mInitialClientPid;
+
     virtual sp<IBinder> asBinderWrapper() {
         return IInterface::asBinder();
     }