Merge "fix soft renderer rotation" into mnc-dev
diff --git a/camera/camera2/ICameraDeviceUser.cpp b/camera/camera2/ICameraDeviceUser.cpp
index ffe974b..d2dc200 100644
--- a/camera/camera2/ICameraDeviceUser.cpp
+++ b/camera/camera2/ICameraDeviceUser.cpp
@@ -48,7 +48,8 @@
     GET_CAMERA_INFO,
     WAIT_UNTIL_IDLE,
     FLUSH,
-    PREPARE
+    PREPARE,
+    TEAR_DOWN
 };
 
 namespace {
@@ -365,6 +366,20 @@
         return reply.readInt32();
     }
 
+    virtual status_t tearDown(int streamId)
+    {
+        ALOGV("tearDown");
+        Parcel data, reply;
+
+        data.writeInterfaceToken(ICameraDeviceUser::getInterfaceDescriptor());
+        data.writeInt32(streamId);
+
+        remote()->transact(TEAR_DOWN, data, &reply);
+
+        reply.readExceptionCode();
+        return reply.readInt32();
+    }
+
 private:
 
 
@@ -570,6 +585,13 @@
             reply->writeInt32(prepare(streamId));
             return NO_ERROR;
         } break;
+        case TEAR_DOWN: {
+            CHECK_INTERFACE(ICameraDeviceUser, data, reply);
+            int streamId = data.readInt32();
+            reply->writeNoException();
+            reply->writeInt32(tearDown(streamId));
+            return NO_ERROR;
+        } break;
 
         default:
             return BBinder::onTransact(code, data, reply, flags);
diff --git a/include/camera/camera2/ICameraDeviceUser.h b/include/camera/camera2/ICameraDeviceUser.h
index b3dd140..a7bf8ab 100644
--- a/include/camera/camera2/ICameraDeviceUser.h
+++ b/include/camera/camera2/ICameraDeviceUser.h
@@ -138,6 +138,12 @@
      * Preallocate buffers for a given output stream asynchronously.
      */
     virtual status_t        prepare(int streamId) = 0;
+
+    /**
+     * Free all unused buffers for a given output stream.
+     */
+    virtual status_t        tearDown(int streamId) = 0;
+
 };
 
 // ----------------------------------------------------------------------------
diff --git a/include/media/IResourceManagerService.h b/include/media/IResourceManagerService.h
index 067392c..1e4f6de 100644
--- a/include/media/IResourceManagerService.h
+++ b/include/media/IResourceManagerService.h
@@ -43,7 +43,7 @@
             const sp<IResourceManagerClient> client,
             const Vector<MediaResource> &resources) = 0;
 
-    virtual void removeResource(int64_t clientId) = 0;
+    virtual void removeResource(int pid, int64_t clientId) = 0;
 
     virtual bool reclaimResource(
             int callingPid,
diff --git a/include/media/stagefright/AudioSource.h b/include/media/stagefright/AudioSource.h
index 50cf371..3074910 100644
--- a/include/media/stagefright/AudioSource.h
+++ b/include/media/stagefright/AudioSource.h
@@ -37,7 +37,8 @@
             audio_source_t inputSource,
             const String16 &opPackageName,
             uint32_t sampleRate,
-            uint32_t channels = 1);
+            uint32_t channels,
+            uint32_t outSampleRate = 0);
 
     status_t initCheck() const;
 
@@ -78,11 +79,13 @@
     status_t mInitCheck;
     bool mStarted;
     int32_t mSampleRate;
+    int32_t mOutSampleRate;
 
     bool mTrackMaxAmplitude;
     int64_t mStartTimeUs;
     int16_t mMaxAmplitude;
     int64_t mPrevSampleTimeUs;
+    int64_t mFirstSampleTimeUs;
     int64_t mInitialReadTimeUs;
     int64_t mNumFramesReceived;
     int64_t mNumClientOwnedBuffers;
diff --git a/include/media/stagefright/MediaCodec.h b/include/media/stagefright/MediaCodec.h
index 720778b..b621b9c 100644
--- a/include/media/stagefright/MediaCodec.h
+++ b/include/media/stagefright/MediaCodec.h
@@ -260,18 +260,18 @@
         virtual void binderDied(const wp<IBinder>& /*who*/);
 
         void addResource(
-                int pid,
                 int64_t clientId,
                 const sp<IResourceManagerClient> client,
                 const Vector<MediaResource> &resources);
 
         void removeResource(int64_t clientId);
 
-        bool reclaimResource(int callingPid, const Vector<MediaResource> &resources);
+        bool reclaimResource(const Vector<MediaResource> &resources);
 
     private:
         Mutex mLock;
         sp<IResourceManagerService> mService;
+        int mPid;
     };
 
     State mState;
diff --git a/include/media/stagefright/MediaSync.h b/include/media/stagefright/MediaSync.h
index 1eef211..4b5cd05 100644
--- a/include/media/stagefright/MediaSync.h
+++ b/include/media/stagefright/MediaSync.h
@@ -37,6 +37,7 @@
 class IGraphicBufferConsumer;
 class IGraphicBufferProducer;
 struct MediaClock;
+struct VideoFrameScheduler;
 
 // MediaSync manages media playback and its synchronization to a media clock
 // source. It can be also used for video-only playback.
@@ -103,6 +104,9 @@
     // MediaClock::getMediaTime() and MediaClock::getRealTimeFor().
     sp<const MediaClock> getMediaClock();
 
+    // Flush mediasync
+    void flush();
+
     // Set the video frame rate hint - this is used by the video FrameScheduler
     status_t setVideoFrameRateHint(float rate);
 
@@ -131,11 +135,10 @@
 
 private:
     enum {
-        kWhatDrainVideo = 'dVid',
+        kWhatDrainVideo          = 'dVid',
+        kWhatCheckFrameAvailable = 'cFrA',
     };
 
-    static const int MAX_OUTSTANDING_BUFFERS = 2;
-
     // This is a thin wrapper class that lets us listen to
     // IConsumerListener::onFrameAvailable from mInput.
     class InputListener : public BnConsumerListener,
@@ -194,6 +197,8 @@
     sp<IGraphicBufferConsumer> mInput;
     sp<IGraphicBufferProducer> mOutput;
     int mUsageFlagsFromOutput;
+    uint32_t mMaxAcquiredBufferCount; // max acquired buffer count
+    bool mReturnPendingInputFrame;    // set while we are pending before acquiring an input frame
 
     sp<AudioTrack> mAudioTrack;
     uint32_t mNativeSampleRateInHz;
@@ -202,6 +207,7 @@
 
     int64_t mNextBufferItemMediaUs;
     List<BufferItem> mBufferItems;
+    sp<VideoFrameScheduler> mFrameScheduler;
 
     // Keep track of buffers received from |mInput|. This is needed because
     // it's possible the consumer of |mOutput| could return a different
@@ -242,8 +248,9 @@
     // onBufferReleasedByOutput releases a buffer back to the input.
     void onFrameAvailableFromInput();
 
-    // Send |bufferItem| to the output for rendering.
-    void renderOneBufferItem_l(const BufferItem &bufferItem);
+    // Send |bufferItem| to the output for rendering. If this is not the only
+    // buffer sent for rendering, check for any dropped frames in |checkInUs| us.
+    void renderOneBufferItem_l(const BufferItem &bufferItem, int64_t checkInUs);
 
     // This implements the onBufferReleased callback from IProducerListener.
     // It gets called from an OutputListener.
diff --git a/media/libmediaplayerservice/VideoFrameScheduler.h b/include/media/stagefright/VideoFrameScheduler.h
similarity index 93%
rename from media/libmediaplayerservice/VideoFrameScheduler.h
rename to include/media/stagefright/VideoFrameScheduler.h
index b1765c9..9d97dfd 100644
--- a/media/libmediaplayerservice/VideoFrameScheduler.h
+++ b/include/media/stagefright/VideoFrameScheduler.h
@@ -39,6 +39,9 @@
     // returns the vsync period for the main display
     nsecs_t getVsyncPeriod();
 
+    // returns the current frames-per-second, or 0.f if not primed
+    float getFrameRate();
+
     void release();
 
     static const size_t kHistorySize = 8;
@@ -54,8 +57,9 @@
         void reset(float fps = -1);
         // keep current estimate, but restart phase
         void restart();
-        // returns period
+        // returns period or 0 if not yet primed
         nsecs_t addSample(nsecs_t time);
+        nsecs_t getPeriod() const;
 
     private:
         nsecs_t mPeriod;
diff --git a/media/libmedia/AudioTrackShared.cpp b/media/libmedia/AudioTrackShared.cpp
index 1d7aed2..6a51a76 100644
--- a/media/libmedia/AudioTrackShared.cpp
+++ b/media/libmedia/AudioTrackShared.cpp
@@ -374,6 +374,9 @@
     size_t increment = mFrameCountP2 << 1;
     size_t mask = increment - 1;
     audio_track_cblk_t* cblk = mCblk;
+    // mFlush is 32 bits concatenated as [ flush_counter ] [ newfront_offset ]
+    // Should newFlush = cblk->u.mStreaming.mRear?  Only problem is
+    // if you want to flush twice to the same rear location after a 32 bit wrap.
     int32_t newFlush = (cblk->u.mStreaming.mRear & mask) |
                         ((cblk->u.mStreaming.mFlush & ~mask) + increment);
     android_atomic_release_store(newFlush, &cblk->u.mStreaming.mFlush);
@@ -613,9 +616,18 @@
         front = cblk->u.mStreaming.mFront;
         if (flush != mFlush) {
             // effectively obtain then release whatever is in the buffer
-            size_t mask = (mFrameCountP2 << 1) - 1;
+            const size_t overflowBit = mFrameCountP2 << 1;
+            const size_t mask = overflowBit - 1;
             int32_t newFront = (front & ~mask) | (flush & mask);
             ssize_t filled = rear - newFront;
+            if (filled >= (ssize_t)overflowBit) {
+                // front and rear offsets span the overflow bit of the p2 mask
+                // so rebasing newFront on the front offset is off by the overflow bit.
+                // adjust newFront to match rear offset.
+                ALOGV("flush wrap: filled %zx >= overflowBit %zx", filled, overflowBit);
+                newFront += overflowBit;
+                filled -= overflowBit;
+            }
             // Rather than shutting down on a corrupt flush, just treat it as a full flush
             if (!(0 <= filled && (size_t) filled <= mFrameCount)) {
                 ALOGE("mFlush %#x -> %#x, front %#x, rear %#x, mask %#x, newFront %#x, "
diff --git a/media/libmedia/IResourceManagerService.cpp b/media/libmedia/IResourceManagerService.cpp
index 6902e99..4598686 100644
--- a/media/libmedia/IResourceManagerService.cpp
+++ b/media/libmedia/IResourceManagerService.cpp
@@ -85,9 +85,10 @@
         remote()->transact(ADD_RESOURCE, data, &reply);
     }
 
-    virtual void removeResource(int64_t clientId) {
+    virtual void removeResource(int pid, int64_t clientId) {
         Parcel data, reply;
         data.writeInterfaceToken(IResourceManagerService::getInterfaceDescriptor());
+        data.writeInt32(pid);
         data.writeInt64(clientId);
 
         remote()->transact(REMOVE_RESOURCE, data, &reply);
@@ -139,8 +140,9 @@
 
         case REMOVE_RESOURCE: {
             CHECK_INTERFACE(IResourceManagerService, data, reply);
+            int pid = data.readInt32();
             int64_t clientId = data.readInt64();
-            removeResource(clientId);
+            removeResource(pid, clientId);
             return NO_ERROR;
         } break;
 
diff --git a/media/libmediaplayerservice/Android.mk b/media/libmediaplayerservice/Android.mk
index 7f0cca2..4d1b587 100644
--- a/media/libmediaplayerservice/Android.mk
+++ b/media/libmediaplayerservice/Android.mk
@@ -21,7 +21,6 @@
     StagefrightPlayer.cpp       \
     StagefrightRecorder.cpp     \
     TestPlayerStub.cpp          \
-    VideoFrameScheduler.cpp     \
 
 LOCAL_SHARED_LIBRARIES :=       \
     libbinder                   \
diff --git a/media/libmediaplayerservice/StagefrightRecorder.cpp b/media/libmediaplayerservice/StagefrightRecorder.cpp
index 98abe9c..e521fae 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.cpp
+++ b/media/libmediaplayerservice/StagefrightRecorder.cpp
@@ -75,8 +75,6 @@
       mOutputFd(-1),
       mAudioSource(AUDIO_SOURCE_CNT),
       mVideoSource(VIDEO_SOURCE_LIST_END),
-      mCaptureTimeLapse(false),
-      mCaptureFps(0.0f),
       mStarted(false) {
 
     ALOGV("Constructor");
@@ -567,32 +565,32 @@
     return OK;
 }
 
-status_t StagefrightRecorder::setParamTimeLapseEnable(int32_t timeLapseEnable) {
-    ALOGV("setParamTimeLapseEnable: %d", timeLapseEnable);
+status_t StagefrightRecorder::setParamCaptureFpsEnable(int32_t captureFpsEnable) {
+    ALOGV("setParamCaptureFpsEnable: %d", captureFpsEnable);
 
-    if(timeLapseEnable == 0) {
-        mCaptureTimeLapse = false;
-    } else if (timeLapseEnable == 1) {
-        mCaptureTimeLapse = true;
+    if(captureFpsEnable == 0) {
+        mCaptureFpsEnable = false;
+    } else if (captureFpsEnable == 1) {
+        mCaptureFpsEnable = true;
     } else {
         return BAD_VALUE;
     }
     return OK;
 }
 
-status_t StagefrightRecorder::setParamTimeLapseFps(float fps) {
-    ALOGV("setParamTimeLapseFps: %.2f", fps);
+status_t StagefrightRecorder::setParamCaptureFps(float fps) {
+    ALOGV("setParamCaptureFps: %.2f", fps);
 
     int64_t timeUs = (int64_t) (1000000.0 / fps + 0.5f);
 
     // Not allowing time more than a day
     if (timeUs <= 0 || timeUs > 86400*1E6) {
-        ALOGE("Time between time lapse frame capture (%lld) is out of range [0, 1 Day]", timeUs);
+        ALOGE("Time between frame capture (%lld) is out of range [0, 1 Day]", timeUs);
         return BAD_VALUE;
     }
 
     mCaptureFps = fps;
-    mTimeBetweenTimeLapseFrameCaptureUs = timeUs;
+    mTimeBetweenCaptureUs = timeUs;
     return OK;
 }
 
@@ -715,14 +713,14 @@
             return setParamVideoTimeScale(timeScale);
         }
     } else if (key == "time-lapse-enable") {
-        int32_t timeLapseEnable;
-        if (safe_strtoi32(value.string(), &timeLapseEnable)) {
-            return setParamTimeLapseEnable(timeLapseEnable);
+        int32_t captureFpsEnable;
+        if (safe_strtoi32(value.string(), &captureFpsEnable)) {
+            return setParamCaptureFpsEnable(captureFpsEnable);
         }
     } else if (key == "time-lapse-fps") {
         float fps;
         if (safe_strtof(value.string(), &fps)) {
-            return setParamTimeLapseFps(fps);
+            return setParamCaptureFps(fps);
         }
     } else {
         ALOGE("setParameter: failed to find key %s", key.string());
@@ -910,12 +908,32 @@
 }
 
 sp<MediaSource> StagefrightRecorder::createAudioSource() {
+    int32_t sourceSampleRate = mSampleRate;
+
+    if (mCaptureFpsEnable && mCaptureFps >= mFrameRate) {
+        // Upscale the sample rate for slow motion recording.
+        // Fail audio source creation if source sample rate is too high, as it could
+        // cause out-of-memory due to large input buffer size. And audio recording
+        // probably doesn't make sense in the scenario, since the slow-down factor
+        // is probably huge (eg. mSampleRate=48K, mCaptureFps=240, mFrameRate=1).
+        const static int32_t SAMPLE_RATE_HZ_MAX = 192000;
+        sourceSampleRate =
+                (mSampleRate * mCaptureFps + mFrameRate / 2) / mFrameRate;
+        if (sourceSampleRate < mSampleRate || sourceSampleRate > SAMPLE_RATE_HZ_MAX) {
+            ALOGE("source sample rate out of range! "
+                    "(mSampleRate %d, mCaptureFps %.2f, mFrameRate %d",
+                    mSampleRate, mCaptureFps, mFrameRate);
+            return NULL;
+        }
+    }
+
     sp<AudioSource> audioSource =
         new AudioSource(
                 mAudioSource,
                 mOpPackageName,
-                mSampleRate,
-                mAudioChannels);
+                sourceSampleRate,
+                mAudioChannels,
+                mSampleRate);
 
     status_t err = audioSource->initCheck();
 
@@ -1207,7 +1225,7 @@
              mVideoEncoder == VIDEO_ENCODER_H264 ? MEDIA_MIMETYPE_VIDEO_AVC : ""),
             false /* decoder */, true /* hwCodec */, &codecs);
 
-    if (!mCaptureTimeLapse) {
+    if (!mCaptureFpsEnable) {
         // Dont clip for time lapse capture as encoder will have enough
         // time to encode because of slow capture rate of time lapse.
         clipVideoBitRate();
@@ -1420,17 +1438,17 @@
     Size videoSize;
     videoSize.width = mVideoWidth;
     videoSize.height = mVideoHeight;
-    if (mCaptureTimeLapse) {
-        if (mTimeBetweenTimeLapseFrameCaptureUs < 0) {
+    if (mCaptureFpsEnable) {
+        if (mTimeBetweenCaptureUs < 0) {
             ALOGE("Invalid mTimeBetweenTimeLapseFrameCaptureUs value: %lld",
-                mTimeBetweenTimeLapseFrameCaptureUs);
+                mTimeBetweenCaptureUs);
             return BAD_VALUE;
         }
 
         mCameraSourceTimeLapse = CameraSourceTimeLapse::CreateFromCamera(
                 mCamera, mCameraProxy, mCameraId, mClientName, mClientUid,
                 videoSize, mFrameRate, mPreviewSurface,
-                mTimeBetweenTimeLapseFrameCaptureUs);
+                mTimeBetweenCaptureUs);
         *cameraSource = mCameraSourceTimeLapse;
     } else {
         *cameraSource = CameraSource::CreateFromCamera(
@@ -1521,14 +1539,13 @@
         format->setInt32("color-format", OMX_COLOR_FormatAndroidOpaque);
 
         // set up time lapse/slow motion for surface source
-        if (mCaptureTimeLapse) {
-            if (mTimeBetweenTimeLapseFrameCaptureUs <= 0) {
-                ALOGE("Invalid mTimeBetweenTimeLapseFrameCaptureUs value: %lld",
-                    mTimeBetweenTimeLapseFrameCaptureUs);
+        if (mCaptureFpsEnable) {
+            if (mTimeBetweenCaptureUs <= 0) {
+                ALOGE("Invalid mTimeBetweenCaptureUs value: %lld",
+                        mTimeBetweenCaptureUs);
                 return BAD_VALUE;
             }
-            format->setInt64("time-lapse",
-                    mTimeBetweenTimeLapseFrameCaptureUs);
+            format->setInt64("time-lapse", mTimeBetweenCaptureUs);
         }
     }
 
@@ -1547,7 +1564,7 @@
     }
 
     format->setInt32("priority", 0 /* realtime */);
-    if (mCaptureTimeLapse) {
+    if (mCaptureFpsEnable) {
         format->setFloat("operating-rate", mCaptureFps);
     }
 
@@ -1647,13 +1664,15 @@
         // This help make sure that the "recoding" sound is suppressed for
         // camcorder applications in the recorded files.
         // TODO Audio source is currently unsupported for webm output; vorbis encoder needed.
-        if (!mCaptureTimeLapse && (mAudioSource != AUDIO_SOURCE_CNT)) {
+        // disable audio for time lapse recording
+        bool disableAudio = mCaptureFpsEnable && mCaptureFps < mFrameRate;
+        if (!disableAudio && mAudioSource != AUDIO_SOURCE_CNT) {
             err = setupAudioEncoder(writer);
             if (err != OK) return err;
             mTotalBitRate += mAudioBitRate;
         }
 
-        if (mCaptureTimeLapse) {
+        if (mCaptureFpsEnable) {
             mp4writer->setCaptureRate(mCaptureFps);
         }
 
@@ -1734,7 +1753,7 @@
     ALOGV("stop");
     status_t err = OK;
 
-    if (mCaptureTimeLapse && mCameraSourceTimeLapse != NULL) {
+    if (mCaptureFpsEnable && mCameraSourceTimeLapse != NULL) {
         mCameraSourceTimeLapse->startQuickReadReturns();
         mCameraSourceTimeLapse = NULL;
     }
@@ -1809,8 +1828,9 @@
     mMaxFileDurationUs = 0;
     mMaxFileSizeBytes = 0;
     mTrackEveryTimeDurationUs = 0;
-    mCaptureTimeLapse = false;
-    mTimeBetweenTimeLapseFrameCaptureUs = -1;
+    mCaptureFpsEnable = false;
+    mCaptureFps = 0.0f;
+    mTimeBetweenCaptureUs = -1;
     mCameraSourceTimeLapse = NULL;
     mIsMetaDataStoredInVideoBuffers = false;
     mEncoderProfiles = MediaProfiles::getInstance();
diff --git a/media/libmediaplayerservice/StagefrightRecorder.h b/media/libmediaplayerservice/StagefrightRecorder.h
index 8af9278..da00bc7 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.h
+++ b/media/libmediaplayerservice/StagefrightRecorder.h
@@ -111,12 +111,11 @@
     int32_t mStartTimeOffsetMs;
     int32_t mTotalBitRate;
 
-    bool mCaptureTimeLapse;
+    bool mCaptureFpsEnable;
     float mCaptureFps;
-    int64_t mTimeBetweenTimeLapseFrameCaptureUs;
+    int64_t mTimeBetweenCaptureUs;
     sp<CameraSourceTimeLapse> mCameraSourceTimeLapse;
 
-
     String8 mParams;
 
     bool mIsMetaDataStoredInVideoBuffers;
@@ -157,8 +156,8 @@
     status_t setParamAudioNumberOfChannels(int32_t channles);
     status_t setParamAudioSamplingRate(int32_t sampleRate);
     status_t setParamAudioTimeScale(int32_t timeScale);
-    status_t setParamTimeLapseEnable(int32_t timeLapseEnable);
-    status_t setParamTimeLapseFps(float fps);
+    status_t setParamCaptureFpsEnable(int32_t timeLapseEnable);
+    status_t setParamCaptureFps(float fps);
     status_t setParamVideoEncodingBitRate(int32_t bitRate);
     status_t setParamVideoIFramesInterval(int32_t seconds);
     status_t setParamVideoEncoderProfile(int32_t profile);
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
index 13a7d94..767417b 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
@@ -29,8 +29,7 @@
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/MetaData.h>
 #include <media/stagefright/Utils.h>
-
-#include <VideoFrameScheduler.h>
+#include <media/stagefright/VideoFrameScheduler.h>
 
 #include <inttypes.h>
 
diff --git a/media/libmediaplayerservice/nuplayer/RTSPSource.cpp b/media/libmediaplayerservice/nuplayer/RTSPSource.cpp
index 5210fc8..58ff113 100644
--- a/media/libmediaplayerservice/nuplayer/RTSPSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/RTSPSource.cpp
@@ -138,7 +138,9 @@
 }
 
 void NuPlayer::RTSPSource::resume() {
-    mHandler->resume();
+    if (mHandler != NULL) {
+        mHandler->resume();
+    }
 }
 
 status_t NuPlayer::RTSPSource::feedMoreTSData() {
@@ -295,13 +297,19 @@
     sp<AMessage> msg = new AMessage(kWhatPerformSeek, this);
     msg->setInt32("generation", ++mSeekGeneration);
     msg->setInt64("timeUs", seekTimeUs);
-    msg->post(200000ll);
 
-    return OK;
+    sp<AMessage> response;
+    status_t err = msg->postAndAwaitResponse(&response);
+    if (err == OK && response != NULL) {
+        CHECK(response->findInt32("err", &err));
+    }
+
+    return err;
 }
 
 void NuPlayer::RTSPSource::performSeek(int64_t seekTimeUs) {
     if (mState != CONNECTED) {
+        finishSeek(INVALID_OPERATION);
         return;
     }
 
@@ -320,9 +328,11 @@
     } else if (msg->what() == kWhatPerformSeek) {
         int32_t generation;
         CHECK(msg->findInt32("generation", &generation));
+        CHECK(msg->senderAwaitsResponse(&mSeekReplyID));
 
         if (generation != mSeekGeneration) {
             // obsolete.
+            finishSeek(OK);
             return;
         }
 
@@ -368,6 +378,37 @@
         case MyHandler::kWhatSeekDone:
         {
             mState = CONNECTED;
+            if (mSeekReplyID != NULL) {
+                // Unblock seekTo here in case we attempted to seek in a live stream
+                finishSeek(OK);
+            }
+            break;
+        }
+
+        case MyHandler::kWhatSeekPaused:
+        {
+            sp<AnotherPacketSource> source = getSource(true /* audio */);
+            if (source != NULL) {
+                source->queueDiscontinuity(ATSParser::DISCONTINUITY_NONE,
+                        /* extra */ NULL,
+                        /* discard */ true);
+            }
+            source = getSource(false /* video */);
+            if (source != NULL) {
+                source->queueDiscontinuity(ATSParser::DISCONTINUITY_NONE,
+                        /* extra */ NULL,
+                        /* discard */ true);
+            };
+
+            status_t err = OK;
+            msg->findInt32("err", &err);
+            finishSeek(err);
+
+            if (err == OK) {
+                int64_t timeUs;
+                CHECK(msg->findInt64("time", &timeUs));
+                mHandler->continueSeekAfterPause(timeUs);
+            }
             break;
         }
 
@@ -700,5 +741,12 @@
     return true;
 }
 
+void NuPlayer::RTSPSource::finishSeek(status_t err) {
+    CHECK(mSeekReplyID != NULL);
+    sp<AMessage> seekReply = new AMessage;
+    seekReply->setInt32("err", err);
+    seekReply->postReply(mSeekReplyID);
+    mSeekReplyID = NULL;
+}
 
 }  // namespace android
diff --git a/media/libmediaplayerservice/nuplayer/RTSPSource.h b/media/libmediaplayerservice/nuplayer/RTSPSource.h
index 5f2cf33..6438a1e 100644
--- a/media/libmediaplayerservice/nuplayer/RTSPSource.h
+++ b/media/libmediaplayerservice/nuplayer/RTSPSource.h
@@ -116,6 +116,8 @@
     int64_t mEOSTimeoutAudio;
     int64_t mEOSTimeoutVideo;
 
+    sp<AReplyToken> mSeekReplyID;
+
     sp<AnotherPacketSource> getSource(bool audio);
 
     void onConnected();
@@ -131,6 +133,7 @@
     void setError(status_t err);
     void startBufferingIfNecessary();
     bool stopBufferingIfNecessary();
+    void finishSeek(status_t err);
 
     DISALLOW_EVIL_CONSTRUCTORS(RTSPSource);
 };
diff --git a/media/libstagefright/Android.mk b/media/libstagefright/Android.mk
index 69128bd..b86c749 100644
--- a/media/libstagefright/Android.mk
+++ b/media/libstagefright/Android.mk
@@ -64,6 +64,7 @@
         TimedEventQueue.cpp               \
         Utils.cpp                         \
         VBRISeeker.cpp                    \
+        VideoFrameScheduler.cpp           \
         WAVExtractor.cpp                  \
         WVMExtractor.cpp                  \
         XINGSeeker.cpp                    \
diff --git a/media/libstagefright/AudioSource.cpp b/media/libstagefright/AudioSource.cpp
index 34f0148..3505844 100644
--- a/media/libstagefright/AudioSource.cpp
+++ b/media/libstagefright/AudioSource.cpp
@@ -50,15 +50,19 @@
 }
 
 AudioSource::AudioSource(
-        audio_source_t inputSource, const String16 &opPackageName, uint32_t sampleRate,
-        uint32_t channelCount)
+        audio_source_t inputSource, const String16 &opPackageName,
+        uint32_t sampleRate, uint32_t channelCount, uint32_t outSampleRate)
     : mStarted(false),
       mSampleRate(sampleRate),
+      mOutSampleRate(outSampleRate > 0 ? outSampleRate : sampleRate),
       mPrevSampleTimeUs(0),
+      mFirstSampleTimeUs(-1ll),
       mNumFramesReceived(0),
       mNumClientOwnedBuffers(0) {
-    ALOGV("sampleRate: %d, channelCount: %d", sampleRate, channelCount);
+    ALOGV("sampleRate: %u, outSampleRate: %u, channelCount: %u",
+            sampleRate, outSampleRate, channelCount);
     CHECK(channelCount == 1 || channelCount == 2);
+    CHECK(sampleRate > 0);
 
     size_t minFrameCount;
     status_t status = AudioRecord::getMinFrameCount(&minFrameCount,
@@ -261,6 +265,15 @@
             (int16_t *) buffer->data(), buffer->range_length() >> 1);
     }
 
+    if (mSampleRate != mOutSampleRate) {
+        if (mFirstSampleTimeUs < 0) {
+            mFirstSampleTimeUs = timeUs;
+        }
+        timeUs = mFirstSampleTimeUs + (timeUs - mFirstSampleTimeUs)
+                * (int64_t)mSampleRate / (int64_t)mOutSampleRate;
+        buffer->meta_data()->setInt64(kKeyTime, timeUs);
+    }
+
     *out = buffer;
     return OK;
 }
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index c9bd20b..b444687 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -54,10 +54,6 @@
 
 namespace android {
 
-static inline int getCallingPid() {
-    return IPCThreadState::self()->getCallingPid();
-}
-
 static int64_t getId(sp<IResourceManagerClient> client) {
     return (int64_t) client.get();
 }
@@ -108,7 +104,8 @@
     DISALLOW_EVIL_CONSTRUCTORS(ResourceManagerClient);
 };
 
-MediaCodec::ResourceManagerServiceProxy::ResourceManagerServiceProxy() {
+MediaCodec::ResourceManagerServiceProxy::ResourceManagerServiceProxy()
+        : mPid(IPCThreadState::self()->getCallingPid()) {
 }
 
 MediaCodec::ResourceManagerServiceProxy::~ResourceManagerServiceProxy() {
@@ -135,7 +132,6 @@
 }
 
 void MediaCodec::ResourceManagerServiceProxy::addResource(
-        int pid,
         int64_t clientId,
         const sp<IResourceManagerClient> client,
         const Vector<MediaResource> &resources) {
@@ -143,7 +139,7 @@
     if (mService == NULL) {
         return;
     }
-    mService->addResource(pid, clientId, client, resources);
+    mService->addResource(mPid, clientId, client, resources);
 }
 
 void MediaCodec::ResourceManagerServiceProxy::removeResource(int64_t clientId) {
@@ -151,16 +147,16 @@
     if (mService == NULL) {
         return;
     }
-    mService->removeResource(clientId);
+    mService->removeResource(mPid, clientId);
 }
 
 bool MediaCodec::ResourceManagerServiceProxy::reclaimResource(
-        int callingPid, const Vector<MediaResource> &resources) {
+        const Vector<MediaResource> &resources) {
     Mutex::Autolock _l(mLock);
     if (mService == NULL) {
         return false;
     }
-    return mService->reclaimResource(callingPid, resources);
+    return mService->reclaimResource(mPid, resources);
 }
 
 // static
@@ -376,7 +372,7 @@
     for (int i = 0; i <= kMaxRetry; ++i) {
         if (i > 0) {
             // Don't try to reclaim resource for the first time.
-            if (!mResourceManagerService->reclaimResource(getCallingPid(), resources)) {
+            if (!mResourceManagerService->reclaimResource(resources)) {
                 break;
             }
         }
@@ -442,7 +438,7 @@
     for (int i = 0; i <= kMaxRetry; ++i) {
         if (i > 0) {
             // Don't try to reclaim resource for the first time.
-            if (!mResourceManagerService->reclaimResource(getCallingPid(), resources)) {
+            if (!mResourceManagerService->reclaimResource(resources)) {
                 break;
             }
         }
@@ -521,7 +517,7 @@
     Vector<MediaResource> resources;
     resources.push_back(MediaResource(type, subtype, value));
     mResourceManagerService->addResource(
-            getCallingPid(), getId(mResourceManagerClient), mResourceManagerClient, resources);
+            getId(mResourceManagerClient), mResourceManagerClient, resources);
 }
 
 status_t MediaCodec::start() {
@@ -539,7 +535,7 @@
     for (int i = 0; i <= kMaxRetry; ++i) {
         if (i > 0) {
             // Don't try to reclaim resource for the first time.
-            if (!mResourceManagerService->reclaimResource(getCallingPid(), resources)) {
+            if (!mResourceManagerService->reclaimResource(resources)) {
                 break;
             }
             // Recover codec from previous error before retry start.
diff --git a/media/libstagefright/MediaSync.cpp b/media/libstagefright/MediaSync.cpp
index 52077a7..0df3ec9 100644
--- a/media/libstagefright/MediaSync.cpp
+++ b/media/libstagefright/MediaSync.cpp
@@ -25,6 +25,7 @@
 #include <media/AudioTrack.h>
 #include <media/stagefright/MediaClock.h>
 #include <media/stagefright/MediaSync.h>
+#include <media/stagefright/VideoFrameScheduler.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/ALooper.h>
 #include <media/stagefright/foundation/AMessage.h>
@@ -50,6 +51,8 @@
         mReleaseCondition(),
         mNumOutstandingBuffers(0),
         mUsageFlagsFromOutput(0),
+        mMaxAcquiredBufferCount(1),
+        mReturnPendingInputFrame(false),
         mNativeSampleRateInHz(0),
         mNumFramesWritten(0),
         mHasAudio(false),
@@ -121,6 +124,11 @@
             ALOGE("setSurface: failed to connect (%d)", status);
             return status;
         }
+
+        if (mFrameScheduler == NULL) {
+            mFrameScheduler = new VideoFrameScheduler();
+            mFrameScheduler->init();
+        }
     }
 
     if (mOutput != NULL) {
@@ -209,6 +217,12 @@
         bufferConsumer->setConsumerUsageBits(mUsageFlagsFromOutput);
         *outBufferProducer = bufferProducer;
         mInput = bufferConsumer;
+
+        // set undequeued buffer count
+        int minUndequeuedBuffers;
+        mOutput->query(NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS, &minUndequeuedBuffers);
+        mMaxAcquiredBufferCount = minUndequeuedBuffers;
+        bufferConsumer->setMaxAcquiredBufferCount(mMaxAcquiredBufferCount);
     }
     return status;
 }
@@ -232,6 +246,7 @@
         mNextBufferItemMediaUs = -1;
     }
     mPlaybackRate = rate;
+    // TODO: update frame scheduler with this info
     mMediaClock->setPlaybackRate(rate);
     onDrainVideo_l();
 }
@@ -325,13 +340,44 @@
     mInput->setConsumerName(String8(name.c_str()));
 }
 
+void MediaSync::flush() {
+    Mutex::Autolock lock(mMutex);
+    if (mFrameScheduler != NULL) {
+        mFrameScheduler->restart();
+    }
+    while (!mBufferItems.empty()) {
+        BufferItem *bufferItem = &*mBufferItems.begin();
+        returnBufferToInput_l(bufferItem->mGraphicBuffer, bufferItem->mFence);
+        mBufferItems.erase(mBufferItems.begin());
+    }
+    mNextBufferItemMediaUs = -1;
+    mNumFramesWritten = 0;
+    mReturnPendingInputFrame = true;
+    mReleaseCondition.signal();
+    mMediaClock->clearAnchor();
+}
+
 status_t MediaSync::setVideoFrameRateHint(float rate) {
-    // ignored until we add the FrameScheduler
-    return rate >= 0.f ? OK : BAD_VALUE;
+    Mutex::Autolock lock(mMutex);
+    if (rate < 0.f) {
+        return BAD_VALUE;
+    }
+    if (mFrameScheduler != NULL) {
+        mFrameScheduler->init(rate);
+    }
+    return OK;
 }
 
 float MediaSync::getVideoFrameRate() {
-    // we don't know the frame rate
+    Mutex::Autolock lock(mMutex);
+    if (mFrameScheduler != NULL) {
+        float fps = mFrameScheduler->getFrameRate();
+        if (fps > 0.f) {
+            return fps;
+        }
+    }
+
+    // we don't have or know the frame rate
     return -1.f;
 }
 
@@ -470,7 +516,7 @@
         CHECK_EQ(res, (status_t)OK);
         numFramesPlayedAt = nowUs;
         numFramesPlayedAt += 1000LL * mAudioTrack->latency() / 2; /* XXX */
-        //ALOGD("getPosition: %d %lld", numFramesPlayed, numFramesPlayedAt);
+        //ALOGD("getPosition: %d %lld", numFramesPlayed, (long long)numFramesPlayedAt);
     }
 
     //can't be negative until 12.4 hrs, test.
@@ -510,18 +556,30 @@
         int64_t itemMediaUs = bufferItem->mTimestamp / 1000;
         int64_t itemRealUs = getRealTime(itemMediaUs, nowUs);
 
-        if (itemRealUs <= nowUs) {
+        // adjust video frame PTS based on vsync
+        itemRealUs = mFrameScheduler->schedule(itemRealUs * 1000) / 1000;
+        int64_t oneVsyncUs = (mFrameScheduler->getVsyncPeriod() / 1000);
+        int64_t twoVsyncsUs = oneVsyncUs * 2;
+
+        // post 2 display refreshes before rendering is due
+        if (itemRealUs <= nowUs + twoVsyncsUs) {
+            ALOGV("adjusting PTS from %lld to %lld",
+                    (long long)bufferItem->mTimestamp / 1000, (long long)itemRealUs);
+            bufferItem->mTimestamp = itemRealUs * 1000;
+            bufferItem->mIsAutoTimestamp = false;
+
             if (mHasAudio) {
                 if (nowUs - itemRealUs <= kMaxAllowedVideoLateTimeUs) {
-                    renderOneBufferItem_l(*bufferItem);
+                    renderOneBufferItem_l(*bufferItem, nowUs + oneVsyncUs - itemRealUs);
                 } else {
                     // too late.
                     returnBufferToInput_l(
                             bufferItem->mGraphicBuffer, bufferItem->mFence);
+                    mFrameScheduler->restart();
                 }
             } else {
                 // always render video buffer in video-only mode.
-                renderOneBufferItem_l(*bufferItem);
+                renderOneBufferItem_l(*bufferItem, nowUs + oneVsyncUs - itemRealUs);
 
                 // smooth out videos >= 10fps
                 mMediaClock->updateAnchor(
@@ -534,7 +592,7 @@
             if (mNextBufferItemMediaUs == -1
                     || mNextBufferItemMediaUs > itemMediaUs) {
                 sp<AMessage> msg = new AMessage(kWhatDrainVideo, this);
-                msg->post(itemRealUs - nowUs);
+                msg->post(itemRealUs - nowUs - twoVsyncsUs);
                 mNextBufferItemMediaUs = itemMediaUs;
             }
             break;
@@ -545,10 +603,18 @@
 void MediaSync::onFrameAvailableFromInput() {
     Mutex::Autolock lock(mMutex);
 
+    const static nsecs_t kAcquireWaitTimeout = 2000000000; // 2 seconds
+
+    mReturnPendingInputFrame = false;
+
     // If there are too many outstanding buffers, wait until a buffer is
     // released back to the input in onBufferReleased.
-    while (mNumOutstandingBuffers >= MAX_OUTSTANDING_BUFFERS) {
-        mReleaseCondition.wait(mMutex);
+    // NOTE: BufferQueue allows dequeuing maxAcquiredBufferCount + 1 buffers
+    while (mNumOutstandingBuffers > mMaxAcquiredBufferCount
+            && !mIsAbandoned && !mReturnPendingInputFrame) {
+        if (mReleaseCondition.waitRelative(mMutex, kAcquireWaitTimeout) != OK) {
+            ALOGI("still waiting to release a buffer before acquire");
+        }
 
         // If the sync is abandoned while we are waiting, the release
         // condition variable will be broadcast, and we should just return
@@ -582,12 +648,21 @@
 
     if (mBuffersFromInput.indexOfKey(bufferItem.mGraphicBuffer->getId()) >= 0) {
         // Something is wrong since this buffer should be at our hands, bail.
+        ALOGE("received buffer multiple times from input");
         mInput->consumerDisconnect();
         onAbandoned_l(true /* isInput */);
         return;
     }
     mBuffersFromInput.add(bufferItem.mGraphicBuffer->getId(), bufferItem.mGraphicBuffer);
 
+    // If flush happened while waiting for a buffer to be released, simply return it
+    // TRICKY: do it here after it is detached so that we don't have to cache mGraphicBuffer.
+    if (mReturnPendingInputFrame) {
+        mReturnPendingInputFrame = false;
+        returnBufferToInput_l(bufferItem.mGraphicBuffer, bufferItem.mFence);
+        return;
+    }
+
     mBufferItems.push_back(bufferItem);
 
     if (mBufferItems.size() == 1) {
@@ -595,7 +670,7 @@
     }
 }
 
-void MediaSync::renderOneBufferItem_l( const BufferItem &bufferItem) {
+void MediaSync::renderOneBufferItem_l(const BufferItem &bufferItem, int64_t checkInUs) {
     IGraphicBufferProducer::QueueBufferInput queueInput(
             bufferItem.mTimestamp,
             bufferItem.mIsAutoTimestamp,
@@ -635,6 +710,12 @@
     mBuffersSentToOutput.add(bufferItem.mGraphicBuffer->getId(), bufferItem.mGraphicBuffer);
 
     ALOGV("queued buffer %#llx to output", (long long)bufferItem.mGraphicBuffer->getId());
+
+    // If we have already queued more than one buffer, check for any free buffers in case
+    // one of them were dropped - as BQ does not signal onBufferReleased in that case.
+    if (mBuffersSentToOutput.size() > 1) {
+        (new AMessage(kWhatCheckFrameAvailable, this))->post(checkInUs);
+    }
 }
 
 void MediaSync::onBufferReleasedByOutput(sp<IGraphicBufferProducer> &output) {
@@ -646,32 +727,38 @@
 
     sp<GraphicBuffer> buffer;
     sp<Fence> fence;
-    status_t status = mOutput->detachNextBuffer(&buffer, &fence);
-    ALOGE_IF(status != NO_ERROR, "detaching buffer from output failed (%d)", status);
+    status_t status;
+    // NOTE: This is a workaround for a BufferQueue bug where onBufferReleased is
+    // called only for released buffers, but not for buffers that were dropped during
+    // acquire. Dropped buffers can still be detached as they are on the free list.
+    // TODO: remove if released callback happens also for dropped buffers
+    while ((status = mOutput->detachNextBuffer(&buffer, &fence)) != NO_MEMORY) {
+        ALOGE_IF(status != NO_ERROR, "detaching buffer from output failed (%d)", status);
 
-    if (status == NO_INIT) {
-        // If the output has been abandoned, we can't do anything else,
-        // since buffer is invalid.
-        onAbandoned_l(false /* isInput */);
-        return;
+        if (status == NO_INIT) {
+            // If the output has been abandoned, we can't do anything else,
+            // since buffer is invalid.
+            onAbandoned_l(false /* isInput */);
+            return;
+        }
+
+        ALOGV("detached buffer %#llx from output", (long long)buffer->getId());
+
+        // If we've been abandoned, we can't return the buffer to the input, so just
+        // move on.
+        if (mIsAbandoned) {
+            return;
+        }
+
+        ssize_t ix = mBuffersSentToOutput.indexOfKey(buffer->getId());
+        if (ix < 0) {
+            // The buffer is unknown, maybe leftover, ignore.
+            return;
+        }
+        mBuffersSentToOutput.removeItemsAt(ix);
+
+        returnBufferToInput_l(buffer, fence);
     }
-
-    ALOGV("detached buffer %#llx from output", (long long)buffer->getId());
-
-    // If we've been abandoned, we can't return the buffer to the input, so just
-    // move on.
-    if (mIsAbandoned) {
-        return;
-    }
-
-    ssize_t ix = mBuffersSentToOutput.indexOfKey(buffer->getId());
-    if (ix < 0) {
-        // The buffer is unknown, maybe leftover, ignore.
-        return;
-    }
-    mBuffersSentToOutput.removeItemsAt(ix);
-
-    returnBufferToInput_l(buffer, fence);
 }
 
 void MediaSync::returnBufferToInput_l(
@@ -679,6 +766,7 @@
     ssize_t ix = mBuffersFromInput.indexOfKey(buffer->getId());
     if (ix < 0) {
         // The buffer is unknown, something is wrong, bail.
+        ALOGE("output returned unknown buffer");
         mOutput->disconnect(NATIVE_WINDOW_API_MEDIA);
         onAbandoned_l(false /* isInput */);
         return;
@@ -741,6 +829,12 @@
             break;
         }
 
+        case kWhatCheckFrameAvailable:
+        {
+            onBufferReleasedByOutput(mOutput);
+            break;
+        }
+
         default:
             TRESPASS();
             break;
diff --git a/media/libmediaplayerservice/VideoFrameScheduler.cpp b/media/libstagefright/VideoFrameScheduler.cpp
similarity index 96%
rename from media/libmediaplayerservice/VideoFrameScheduler.cpp
rename to media/libstagefright/VideoFrameScheduler.cpp
index ce5f5fe..5fe9bf9 100644
--- a/media/libmediaplayerservice/VideoFrameScheduler.cpp
+++ b/media/libstagefright/VideoFrameScheduler.cpp
@@ -28,8 +28,7 @@
 
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AUtils.h>
-
-#include "VideoFrameScheduler.h"
+#include <media/stagefright/VideoFrameScheduler.h>
 
 namespace android {
 
@@ -56,7 +55,7 @@
 static const size_t kMaxSamplesToEstimatePeriod = VideoFrameScheduler::kHistorySize;
 
 static const size_t kPrecision = 12;
-static const size_t kErrorThreshold = (1 << (kPrecision * 2)) / 10;
+static const int64_t kErrorThreshold = (1 << (kPrecision * 2)) / 10;
 static const int64_t kMultiplesThresholdDiv = 4;            // 25%
 static const int64_t kReFitThresholdDiv = 100;              // 1%
 static const nsecs_t kMaxAllowedFrameSkip = kNanosIn1s;     // 1 sec
@@ -258,7 +257,8 @@
             mPhase = firstTime;
         }
     }
-    ALOGV("priming[%zu] phase:%lld period:%lld", numSamplesToUse, mPhase, mPeriod);
+    ALOGV("priming[%zu] phase:%lld period:%lld",
+            numSamplesToUse, (long long)mPhase, (long long)mPeriod);
 }
 
 nsecs_t VideoFrameScheduler::PLL::addSample(nsecs_t time) {
@@ -316,6 +316,10 @@
     return mPeriod;
 }
 
+nsecs_t VideoFrameScheduler::PLL::getPeriod() const {
+    return mPrimed ? mPeriod : 0;
+}
+
 /* ======================================================================= */
 /*                             Frame Scheduler                             */
 /* ======================================================================= */
@@ -382,6 +386,14 @@
     return kDefaultVsyncPeriod;
 }
 
+float VideoFrameScheduler::getFrameRate() {
+    nsecs_t videoPeriod = mPll.getPeriod();
+    if (videoPeriod > 0) {
+        return 1e9 / videoPeriod;
+    }
+    return 0.f;
+}
+
 nsecs_t VideoFrameScheduler::schedule(nsecs_t renderTime) {
     nsecs_t origRenderTime = renderTime;
 
diff --git a/media/libstagefright/codecs/hevcdec/SoftHEVC.cpp b/media/libstagefright/codecs/hevcdec/SoftHEVC.cpp
index 5c05a0e..1db350f 100644
--- a/media/libstagefright/codecs/hevcdec/SoftHEVC.cpp
+++ b/media/libstagefright/codecs/hevcdec/SoftHEVC.cpp
@@ -82,7 +82,10 @@
     initPorts(
             kNumBuffers, max(kMaxOutputBufferSize / kMinCompressionRatio, (size_t)INPUT_BUF_SIZE),
             kNumBuffers, CODEC_MIME_TYPE, kMinCompressionRatio);
-    CHECK_EQ(initDecoder(), (status_t)OK);
+}
+
+status_t SoftHEVC::init() {
+    return initDecoder();
 }
 
 SoftHEVC::~SoftHEVC() {
@@ -766,5 +769,10 @@
 android::SoftOMXComponent *createSoftOMXComponent(const char *name,
         const OMX_CALLBACKTYPE *callbacks, OMX_PTR appData,
         OMX_COMPONENTTYPE **component) {
-    return new android::SoftHEVC(name, callbacks, appData, component);
+    android::SoftHEVC *codec = new android::SoftHEVC(name, callbacks, appData, component);
+    if (codec->init() != android::OK) {
+        android::sp<android::SoftOMXComponent> release = codec;
+        return NULL;
+    }
+    return codec;
 }
diff --git a/media/libstagefright/codecs/hevcdec/SoftHEVC.h b/media/libstagefright/codecs/hevcdec/SoftHEVC.h
index a91f528..c6344cf 100644
--- a/media/libstagefright/codecs/hevcdec/SoftHEVC.h
+++ b/media/libstagefright/codecs/hevcdec/SoftHEVC.h
@@ -56,6 +56,8 @@
     SoftHEVC(const char *name, const OMX_CALLBACKTYPE *callbacks,
             OMX_PTR appData, OMX_COMPONENTTYPE **component);
 
+    status_t init();
+
 protected:
     virtual ~SoftHEVC();
 
diff --git a/media/libstagefright/rtsp/MyHandler.h b/media/libstagefright/rtsp/MyHandler.h
index e64a7a1..0d0baf3 100644
--- a/media/libstagefright/rtsp/MyHandler.h
+++ b/media/libstagefright/rtsp/MyHandler.h
@@ -98,6 +98,7 @@
     enum {
         kWhatConnected                  = 'conn',
         kWhatDisconnected               = 'disc',
+        kWhatSeekPaused                 = 'spau',
         kWhatSeekDone                   = 'sdon',
 
         kWhatAccessUnit                 = 'accU',
@@ -220,6 +221,12 @@
         msg->post();
     }
 
+    void continueSeekAfterPause(int64_t timeUs) {
+        sp<AMessage> msg = new AMessage('see1', this);
+        msg->setInt64("time", timeUs);
+        msg->post();
+    }
+
     bool isSeekable() const {
         return mSeekable;
     }
@@ -1180,7 +1187,7 @@
                 mCheckPending = true;
                 ++mCheckGeneration;
 
-                sp<AMessage> reply = new AMessage('see1', this);
+                sp<AMessage> reply = new AMessage('see0', this);
                 reply->setInt64("time", timeUs);
 
                 if (mPausing) {
@@ -1203,9 +1210,26 @@
                 break;
             }
 
-            case 'see1':
+            case 'see0':
             {
                 // Session is paused now.
+                status_t err = OK;
+                msg->findInt32("result", &err);
+
+                int64_t timeUs;
+                CHECK(msg->findInt64("time", &timeUs));
+
+                sp<AMessage> notify = mNotify->dup();
+                notify->setInt32("what", kWhatSeekPaused);
+                notify->setInt32("err", err);
+                notify->setInt64("time", timeUs);
+                notify->post();
+                break;
+
+            }
+
+            case 'see1':
+            {
                 for (size_t i = 0; i < mTracks.size(); ++i) {
                     TrackInfo *info = &mTracks.editItemAt(i);
 
diff --git a/services/audioflinger/AudioStreamOut.cpp b/services/audioflinger/AudioStreamOut.cpp
index f953cc8..b6d1be7 100644
--- a/services/audioflinger/AudioStreamOut.cpp
+++ b/services/audioflinger/AudioStreamOut.cpp
@@ -143,9 +143,19 @@
     return status;
 }
 
-size_t AudioStreamOut::getFrameSize()
+audio_format_t AudioStreamOut::getFormat() const
 {
-    return mHalFrameSize;
+    return stream->common.get_format(&stream->common);
+}
+
+uint32_t AudioStreamOut::getSampleRate() const
+{
+    return stream->common.get_sample_rate(&stream->common);
+}
+
+audio_channel_mask_t AudioStreamOut::getChannelMask() const
+{
+    return stream->common.get_channels(&stream->common);
 }
 
 int AudioStreamOut::flush()
@@ -165,7 +175,6 @@
     ALOG_ASSERT(stream != NULL);
     mRenderPosition = 0;
     mFramesWrittenAtStandby = mFramesWritten;
-    ALOGI("AudioStreamOut::standby(), mFramesWrittenAtStandby = %llu", mFramesWrittenAtStandby);
     return stream->common.standby(&stream->common);
 }
 
diff --git a/services/audioflinger/AudioStreamOut.h b/services/audioflinger/AudioStreamOut.h
index 761e771..06a2277 100644
--- a/services/audioflinger/AudioStreamOut.h
+++ b/services/audioflinger/AudioStreamOut.h
@@ -75,7 +75,28 @@
     */
     virtual ssize_t write(const void *buffer, size_t bytes);
 
-    virtual size_t getFrameSize();
+    /**
+     * @return frame size from the perspective of the application and the AudioFlinger.
+     */
+    virtual size_t getFrameSize() const { return mHalFrameSize; }
+
+    /**
+     * @return format from the perspective of the application and the AudioFlinger.
+     */
+    virtual audio_format_t getFormat() const;
+
+    /**
+     * The HAL may be running at a higher sample rate if, for example, playing wrapped EAC3.
+     * @return sample rate from the perspective of the application and the AudioFlinger.
+     */
+    virtual uint32_t getSampleRate() const;
+
+    /**
+     * The HAL is in stereo mode when playing multi-channel compressed audio over HDMI.
+     * @return channel mask from the perspective of the application and the AudioFlinger.
+     */
+    virtual audio_channel_mask_t getChannelMask() const;
+
 
     virtual status_t flush();
     virtual status_t standby();
diff --git a/services/audioflinger/SpdifStreamOut.cpp b/services/audioflinger/SpdifStreamOut.cpp
index 6af7bce..6b6f5db 100644
--- a/services/audioflinger/SpdifStreamOut.cpp
+++ b/services/audioflinger/SpdifStreamOut.cpp
@@ -37,6 +37,9 @@
             audio_format_t format)
         : AudioStreamOut(dev,flags)
         , mSpdifEncoder(this, format)
+        , mApplicationFormat(AUDIO_FORMAT_DEFAULT)
+        , mApplicationSampleRate(0)
+        , mApplicationChannelMask(0)
 {
 }
 
@@ -48,6 +51,10 @@
 {
     struct audio_config customConfig = *config;
 
+    mApplicationFormat = config->format;
+    mApplicationSampleRate = config->sample_rate;
+    mApplicationChannelMask = config->channel_mask;
+
     // Some data bursts run at a higher sample rate.
     // TODO Move this into the audio_utils as a static method.
     switch(config->format) {
@@ -106,20 +113,15 @@
     return AudioStreamOut::standby();
 }
 
-size_t SpdifStreamOut::getFrameSize()
-{
-    return sizeof(int8_t);
-}
-
 ssize_t SpdifStreamOut::writeDataBurst(const void* buffer, size_t bytes)
 {
     return AudioStreamOut::write(buffer, bytes);
 }
 
-ssize_t SpdifStreamOut::write(const void* buffer, size_t bytes)
+ssize_t SpdifStreamOut::write(const void* buffer, size_t numBytes)
 {
     // Write to SPDIF wrapper. It will call back to writeDataBurst().
-    return mSpdifEncoder.write(buffer, bytes);
+    return mSpdifEncoder.write(buffer, numBytes);
 }
 
 } // namespace android
diff --git a/services/audioflinger/SpdifStreamOut.h b/services/audioflinger/SpdifStreamOut.h
index a61a7bd..c870250 100644
--- a/services/audioflinger/SpdifStreamOut.h
+++ b/services/audioflinger/SpdifStreamOut.h
@@ -64,7 +64,27 @@
     */
     virtual ssize_t write(const void* buffer, size_t bytes);
 
-    virtual size_t getFrameSize();
+    /**
+     * @return frame size from the perspective of the application and the AudioFlinger.
+     */
+    virtual size_t getFrameSize() const { return sizeof(int8_t); }
+
+    /**
+     * @return format from the perspective of the application and the AudioFlinger.
+     */
+    virtual audio_format_t getFormat() const { return mApplicationFormat; }
+
+    /**
+     * The HAL may be running at a higher sample rate if, for example, playing wrapped EAC3.
+     * @return sample rate from the perspective of the application and the AudioFlinger.
+     */
+    virtual uint32_t getSampleRate() const { return mApplicationSampleRate; }
+
+    /**
+     * The HAL is in stereo mode when playing multi-channel compressed audio over HDMI.
+     * @return channel mask from the perspective of the application and the AudioFlinger.
+     */
+    virtual audio_channel_mask_t getChannelMask() const { return mApplicationChannelMask; }
 
     virtual status_t flush();
     virtual status_t standby();
@@ -89,6 +109,9 @@
     };
 
     MySPDIFEncoder       mSpdifEncoder;
+    audio_format_t       mApplicationFormat;
+    uint32_t             mApplicationSampleRate;
+    audio_channel_mask_t mApplicationChannelMask;
 
     ssize_t  writeDataBurst(const void* data, size_t bytes);
     ssize_t  writeInternal(const void* buffer, size_t bytes);
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index d9f1a83..c360051 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -2081,8 +2081,8 @@
 void AudioFlinger::PlaybackThread::readOutputParameters_l()
 {
     // unfortunately we have no way of recovering from errors here, hence the LOG_ALWAYS_FATAL
-    mSampleRate = mOutput->stream->common.get_sample_rate(&mOutput->stream->common);
-    mChannelMask = mOutput->stream->common.get_channels(&mOutput->stream->common);
+    mSampleRate = mOutput->getSampleRate();
+    mChannelMask = mOutput->getChannelMask();
     if (!audio_is_output_channel(mChannelMask)) {
         LOG_ALWAYS_FATAL("HAL channel mask %#x not valid for output", mChannelMask);
     }
@@ -2092,8 +2092,12 @@
                 mChannelMask);
     }
     mChannelCount = audio_channel_count_from_out_mask(mChannelMask);
+
+    // Get actual HAL format.
     mHALFormat = mOutput->stream->common.get_format(&mOutput->stream->common);
-    mFormat = mHALFormat;
+    // Get format from the shim, which will be different than the HAL format
+    // if playing compressed audio over HDMI passthrough.
+    mFormat = mOutput->getFormat();
     if (!audio_is_valid_format(mFormat)) {
         LOG_ALWAYS_FATAL("HAL format %#x not valid for output", mFormat);
     }
@@ -4559,9 +4563,10 @@
         // app does not call stop() and relies on underrun to stop:
         // hence the test on (track->mRetryCount > 1).
         // If retryCount<=1 then track is about to underrun and be removed.
+        // Do not use a high threshold for compressed audio.
         uint32_t minFrames;
         if ((track->sharedBuffer() == 0) && !track->isStopping_1() && !track->isPausing()
-            && (track->mRetryCount > 1)) {
+            && (track->mRetryCount > 1) && audio_is_linear_pcm(mFormat)) {
             minFrames = mNormalFrameCount;
         } else {
             minFrames = 1;
@@ -4650,6 +4655,9 @@
                     // it will then automatically call start() when data is available
                     android_atomic_or(CBLK_DISABLED, &cblk->mFlags);
                 } else if (last) {
+                    ALOGW("pause because of UNDERRUN, framesReady = %zu,"
+                            "minFrames = %u, mFormat = %#x",
+                            track->framesReady(), minFrames, mFormat);
                     mixerStatus = MIXER_TRACKS_ENABLED;
                     if (mHwSupportsPause && !mHwPaused && !mStandby) {
                         doHwPause = true;
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Android.mk b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Android.mk
index 46b2725..a523656 100755
--- a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Android.mk
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Android.mk
@@ -26,13 +26,8 @@
 LOCAL_SHARED_LIBRARIES := \
     libaudiopolicyengineconfigurable  \
     libparameter \
-    libicuuc \
-    liblog \
-
-LOCAL_STATIC_LIBRARIES := \
     libxmlserializer \
-    libpfw_utility \
-    libxml2 \
+    liblog \
 
 LOCAL_MODULE_TAGS := optional
 LOCAL_MODULE := libpolicy-subsystem
diff --git a/services/camera/libcameraservice/api1/Camera2Client.cpp b/services/camera/libcameraservice/api1/Camera2Client.cpp
index 8e88ca7..36e99dd 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.cpp
+++ b/services/camera/libcameraservice/api1/Camera2Client.cpp
@@ -1973,6 +1973,14 @@
         return res;
     }
 
+    // Ideally we don't need this, but current camera device
+    // status tracking mechanism demands it.
+    res = mDevice->waitUntilDrained();
+    if (res != OK) {
+        ALOGE("%s: Camera %d: Waiting device drain failed: %s (%d)",
+                __FUNCTION__, mCameraId, strerror(-res), res);
+    }
+
     res = updateProcessorStream(mJpegProcessor, params);
     return res;
 }
diff --git a/services/camera/libcameraservice/api1/client2/Parameters.cpp b/services/camera/libcameraservice/api1/client2/Parameters.cpp
index 699de2f..442eb75 100644
--- a/services/camera/libcameraservice/api1/client2/Parameters.cpp
+++ b/services/camera/libcameraservice/api1/client2/Parameters.cpp
@@ -2865,7 +2865,8 @@
             }
         }
     }
-    ALOGE("%s: cannot find jpeg size %dx%d", size.width, size.height);
+    ALOGE("%s: cannot find min frame duration for jpeg size %dx%d",
+            __FUNCTION__, size.width, size.height);
     return -1;
 }
 
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
index 3b83f63..c717a56 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
@@ -719,6 +719,38 @@
     return res;
 }
 
+status_t CameraDeviceClient::tearDown(int streamId) {
+    ATRACE_CALL();
+    ALOGV("%s", __FUNCTION__);
+
+    status_t res = OK;
+    if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
+
+    Mutex::Autolock icl(mBinderSerializationLock);
+
+    // Guard against trying to prepare non-created streams
+    ssize_t index = NAME_NOT_FOUND;
+    for (size_t i = 0; i < mStreamMap.size(); ++i) {
+        if (streamId == mStreamMap.valueAt(i)) {
+            index = i;
+            break;
+        }
+    }
+
+    if (index == NAME_NOT_FOUND) {
+        ALOGW("%s: Camera %d: Invalid stream ID (%d) specified, no stream "
+              "created yet", __FUNCTION__, mCameraId, streamId);
+        return BAD_VALUE;
+    }
+
+    // Also returns BAD_VALUE if stream ID was not valid or if the stream is in
+    // use
+    res = mDevice->tearDown(streamId);
+
+    return res;
+}
+
+
 status_t CameraDeviceClient::dump(int fd, const Vector<String16>& args) {
     String8 result;
     result.appendFormat("CameraDeviceClient[%d] (%p) dump:\n",
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.h b/services/camera/libcameraservice/api2/CameraDeviceClient.h
index 0f485ca..1f8b39d 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.h
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.h
@@ -111,6 +111,9 @@
     // Prepare stream by preallocating its buffers
     virtual status_t      prepare(int streamId);
 
+    // Tear down stream resources by freeing its unused buffers
+    virtual status_t      tearDown(int streamId);
+
     /**
      * Interface used by CameraService
      */
diff --git a/services/camera/libcameraservice/common/CameraDeviceBase.h b/services/camera/libcameraservice/common/CameraDeviceBase.h
index 06177e3..cd25949 100644
--- a/services/camera/libcameraservice/common/CameraDeviceBase.h
+++ b/services/camera/libcameraservice/common/CameraDeviceBase.h
@@ -289,6 +289,11 @@
     virtual status_t prepare(int streamId) = 0;
 
     /**
+     * Free stream resources by dumping its unused gralloc buffers.
+     */
+    virtual status_t tearDown(int streamId) = 0;
+
+    /**
      * Get the HAL device version.
      */
     virtual uint32_t getDeviceVersion() = 0;
diff --git a/services/camera/libcameraservice/common/CameraModule.cpp b/services/camera/libcameraservice/common/CameraModule.cpp
index 1ae01ae..6a4dfe0 100644
--- a/services/camera/libcameraservice/common/CameraModule.cpp
+++ b/services/camera/libcameraservice/common/CameraModule.cpp
@@ -136,9 +136,10 @@
     // Always add a default for the pre-correction active array if the vendor chooses to omit this
     camera_metadata_entry entry = chars.find(ANDROID_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE);
     if (entry.count == 0) {
+        Vector<int32_t> preCorrectionArray;
         entry = chars.find(ANDROID_SENSOR_INFO_ACTIVE_ARRAY_SIZE);
-        chars.update(ANDROID_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE, entry.data.i32,
-                entry.count);
+        preCorrectionArray.appendArray(entry.data.i32, entry.count);
+        chars.update(ANDROID_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE, preCorrectionArray);
     }
 
     return;
diff --git a/services/camera/libcameraservice/device2/Camera2Device.cpp b/services/camera/libcameraservice/device2/Camera2Device.cpp
index dfe5565..c9c990c 100644
--- a/services/camera/libcameraservice/device2/Camera2Device.cpp
+++ b/services/camera/libcameraservice/device2/Camera2Device.cpp
@@ -626,6 +626,12 @@
     return NO_INIT;
 }
 
+status_t Camera2Device::tearDown(int streamId) {
+    ATRACE_CALL();
+    ALOGE("%s: Camera %d: unimplemented", __FUNCTION__, mId);
+    return NO_INIT;
+}
+
 uint32_t Camera2Device::getDeviceVersion() {
     ATRACE_CALL();
     return mDeviceVersion;
diff --git a/services/camera/libcameraservice/device2/Camera2Device.h b/services/camera/libcameraservice/device2/Camera2Device.h
index c9f3a2c..34c1ded 100644
--- a/services/camera/libcameraservice/device2/Camera2Device.h
+++ b/services/camera/libcameraservice/device2/Camera2Device.h
@@ -85,8 +85,9 @@
             buffer_handle_t *buffer, wp<BufferReleasedListener> listener);
     // Flush implemented as just a wait
     virtual status_t flush(int64_t *lastFrameNumber = NULL);
-    // Prepare is a no-op
+    // Prepare and tearDown are no-ops
     virtual status_t prepare(int streamId);
+    virtual status_t tearDown(int streamId);
 
     virtual uint32_t getDeviceVersion();
     virtual ssize_t getJpegBufferSize(uint32_t width, uint32_t height) const;
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index 9e73b5c..3afbd89 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -1384,6 +1384,37 @@
     return mPreparerThread->prepare(stream);
 }
 
+status_t Camera3Device::tearDown(int streamId) {
+    ATRACE_CALL();
+    ALOGV("%s: Camera %d: Tearing down stream %d", __FUNCTION__, mId, streamId);
+    Mutex::Autolock il(mInterfaceLock);
+    Mutex::Autolock l(mLock);
+
+    // Teardown can only be accomplished on devices that don't require register_stream_buffers,
+    // since we cannot call register_stream_buffers except right after configure_streams.
+    if (mHal3Device->common.version < CAMERA_DEVICE_API_VERSION_3_2) {
+        ALOGE("%s: Unable to tear down streams on device HAL v%x",
+                __FUNCTION__, mHal3Device->common.version);
+        return NO_INIT;
+    }
+
+    sp<Camera3StreamInterface> stream;
+    ssize_t outputStreamIdx = mOutputStreams.indexOfKey(streamId);
+    if (outputStreamIdx == NAME_NOT_FOUND) {
+        CLOGE("Stream %d does not exist", streamId);
+        return BAD_VALUE;
+    }
+
+    stream = mOutputStreams.editValueAt(outputStreamIdx);
+
+    if (stream->hasOutstandingBuffers() || mRequestThread->isStreamPending(stream)) {
+        CLOGE("Stream %d is a target of a in-progress request", streamId);
+        return BAD_VALUE;
+    }
+
+    return stream->tearDown();
+}
+
 uint32_t Camera3Device::getDeviceVersion() {
     ATRACE_CALL();
     Mutex::Autolock il(mInterfaceLock);
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index 31b6132..140da98 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -141,6 +141,8 @@
 
     virtual status_t prepare(int streamId);
 
+    virtual status_t tearDown(int streamId);
+
     virtual uint32_t getDeviceVersion();
 
     virtual ssize_t getJpegBufferSize(uint32_t width, uint32_t height) const;
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.cpp b/services/camera/libcameraservice/device3/Camera3Stream.cpp
index 4c40bb6..2527fd6 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Stream.cpp
@@ -364,6 +364,61 @@
     return res;
 }
 
+status_t Camera3Stream::tearDown() {
+    ATRACE_CALL();
+    Mutex::Autolock l(mLock);
+
+    status_t res = OK;
+
+    // This function should be only called when the stream is configured.
+    if (mState != STATE_CONFIGURED) {
+        ALOGE("%s: Stream %d: Can't tear down stream if stream is not in "
+                "CONFIGURED state %d", __FUNCTION__, mId, mState);
+        return INVALID_OPERATION;
+    }
+
+    // If any buffers have been handed to the HAL, the stream cannot be torn down.
+    if (getHandoutOutputBufferCountLocked() > 0) {
+        ALOGE("%s: Stream %d: Can't tear down a stream that has outstanding buffers",
+                __FUNCTION__, mId);
+        return INVALID_OPERATION;
+    }
+
+    // Free buffers by disconnecting and then reconnecting to the buffer queue
+    // Only unused buffers will be dropped immediately; buffers that have been filled
+    // and are waiting to be acquired by the consumer and buffers that are currently
+    // acquired will be freed once they are released by the consumer.
+
+    res = disconnectLocked();
+    if (res != OK) {
+        if (res == -ENOTCONN) {
+            // queue has been disconnected, nothing left to do, so exit with success
+            return OK;
+        }
+        ALOGE("%s: Stream %d: Unable to disconnect to tear down buffers: %s (%d)",
+                __FUNCTION__, mId, strerror(-res), res);
+        return res;
+    }
+
+    mState = STATE_IN_CONFIG;
+
+    res = configureQueueLocked();
+    if (res != OK) {
+        ALOGE("%s: Unable to configure stream %d queue: %s (%d)",
+                __FUNCTION__, mId, strerror(-res), res);
+        mState = STATE_ERROR;
+        return res;
+    }
+
+    // Reset prepared state, since we've reconnected to the queue and can prepare again.
+    mPrepared = false;
+    mStreamUnpreparable = false;
+
+    mState = STATE_CONFIGURED;
+
+    return OK;
+}
+
 status_t Camera3Stream::getBuffer(camera3_stream_buffer *buffer) {
     ATRACE_CALL();
     Mutex::Autolock l(mLock);
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.h b/services/camera/libcameraservice/device3/Camera3Stream.h
index 0543c66..bab2177 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.h
+++ b/services/camera/libcameraservice/device3/Camera3Stream.h
@@ -247,6 +247,20 @@
     status_t        cancelPrepare();
 
     /**
+     * Tear down memory for this stream. This frees all unused gralloc buffers
+     * allocated for this stream, but leaves it ready for operation afterward.
+     *
+     * May only be called in the CONFIGURED state, and keeps the stream in
+     * the CONFIGURED state.
+     *
+     * Returns:
+     *    OK if teardown succeeded.
+     *    INVALID_OPERATION if not in the CONFIGURED state
+     *    NO_INIT in case of a serious error from the HAL device
+     */
+    status_t       tearDown();
+
+    /**
      * Fill in the camera3_stream_buffer with the next valid buffer for this
      * stream, to hand over to the HAL.
      *
diff --git a/services/camera/libcameraservice/device3/Camera3StreamInterface.h b/services/camera/libcameraservice/device3/Camera3StreamInterface.h
index 6c87a45..c086eaf 100644
--- a/services/camera/libcameraservice/device3/Camera3StreamInterface.h
+++ b/services/camera/libcameraservice/device3/Camera3StreamInterface.h
@@ -152,6 +152,20 @@
     virtual status_t cancelPrepare() = 0;
 
     /**
+     * Tear down memory for this stream. This frees all unused gralloc buffers
+     * allocated for this stream, but leaves it ready for operation afterward.
+     *
+     * May only be called in the CONFIGURED state, and keeps the stream in
+     * the CONFIGURED state.
+     *
+     * Returns:
+     *    OK if teardown succeeded.
+     *    INVALID_OPERATION if not in the CONFIGURED state
+     *    NO_INIT in case of a serious error from the HAL device
+     */
+    virtual status_t tearDown() = 0;
+
+    /**
      * Fill in the camera3_stream_buffer with the next valid buffer for this
      * stream, to hand over to the HAL.
      *
diff --git a/services/mediaresourcemanager/ResourceManagerService.cpp b/services/mediaresourcemanager/ResourceManagerService.cpp
index 61147ff..e54cc5a 100644
--- a/services/mediaresourcemanager/ResourceManagerService.cpp
+++ b/services/mediaresourcemanager/ResourceManagerService.cpp
@@ -179,23 +179,24 @@
     info.resources.appendVector(resources);
 }
 
-void ResourceManagerService::removeResource(int64_t clientId) {
-    String8 log = String8::format("removeResource(%lld)", (long long) clientId);
+void ResourceManagerService::removeResource(int pid, int64_t clientId) {
+    String8 log = String8::format(
+            "removeResource(pid %d, clientId %lld)",
+            pid, (long long) clientId);
     mServiceLog->add(log);
 
     Mutex::Autolock lock(mLock);
+    ssize_t index = mMap.indexOfKey(pid);
+    if (index < 0) {
+        ALOGV("removeResource: didn't find pid %d for clientId %lld", pid, (long long) clientId);
+        return;
+    }
     bool found = false;
-    for (size_t i = 0; i < mMap.size(); ++i) {
-        ResourceInfos &infos = mMap.editValueAt(i);
-        for (size_t j = 0; j < infos.size();) {
-            if (infos[j].clientId == clientId) {
-                j = infos.removeAt(j);
-                found = true;
-            } else {
-                ++j;
-            }
-        }
-        if (found) {
+    ResourceInfos &infos = mMap.editValueAt(index);
+    for (size_t j = 0; j < infos.size(); ++j) {
+        if (infos[j].clientId == clientId) {
+            j = infos.removeAt(j);
+            found = true;
             break;
         }
     }
diff --git a/services/mediaresourcemanager/ResourceManagerService.h b/services/mediaresourcemanager/ResourceManagerService.h
index ca218fc..4769373 100644
--- a/services/mediaresourcemanager/ResourceManagerService.h
+++ b/services/mediaresourcemanager/ResourceManagerService.h
@@ -63,7 +63,7 @@
             const sp<IResourceManagerClient> client,
             const Vector<MediaResource> &resources);
 
-    virtual void removeResource(int64_t clientId);
+    virtual void removeResource(int pid, int64_t clientId);
 
     // Tries to reclaim resource from processes with lower priority than the calling process
     // according to the requested resources.
diff --git a/services/mediaresourcemanager/test/ResourceManagerService_test.cpp b/services/mediaresourcemanager/test/ResourceManagerService_test.cpp
index 8ae6a55..df49ddc 100644
--- a/services/mediaresourcemanager/test/ResourceManagerService_test.cpp
+++ b/services/mediaresourcemanager/test/ResourceManagerService_test.cpp
@@ -29,6 +29,10 @@
 
 namespace android {
 
+static int64_t getId(sp<IResourceManagerClient> client) {
+    return (int64_t) client.get();
+}
+
 struct TestProcessInfo : public ProcessInfoInterface {
     TestProcessInfo() {}
     virtual ~TestProcessInfo() {}
@@ -45,12 +49,12 @@
 };
 
 struct TestClient : public BnResourceManagerClient {
-    TestClient(sp<ResourceManagerService> service)
-        : mReclaimed(false), mService(service) {}
+    TestClient(int pid, sp<ResourceManagerService> service)
+        : mReclaimed(false), mPid(pid), mService(service) {}
 
     virtual bool reclaimResource() {
         sp<IResourceManagerClient> client(this);
-        mService->removeResource((int64_t) client.get());
+        mService->removeResource(mPid, (int64_t) client.get());
         mReclaimed = true;
         return true;
     }
@@ -72,6 +76,7 @@
 
 private:
     bool mReclaimed;
+    int mPid;
     sp<ResourceManagerService> mService;
     DISALLOW_EVIL_CONSTRUCTORS(TestClient);
 };
@@ -87,9 +92,9 @@
 public:
     ResourceManagerServiceTest()
         : mService(new ResourceManagerService(new TestProcessInfo)),
-          mTestClient1(new TestClient(mService)),
-          mTestClient2(new TestClient(mService)),
-          mTestClient3(new TestClient(mService)) {
+          mTestClient1(new TestClient(kTestPid1, mService)),
+          mTestClient2(new TestClient(kTestPid2, mService)),
+          mTestClient3(new TestClient(kTestPid2, mService)) {
     }
 
 protected:
@@ -144,24 +149,24 @@
         // kTestPid1 mTestClient1
         Vector<MediaResource> resources1;
         resources1.push_back(MediaResource(String8(kResourceSecureCodec), 1));
-        mService->addResource(kTestPid1, (int64_t) mTestClient1.get(), mTestClient1, resources1);
+        mService->addResource(kTestPid1, getId(mTestClient1), mTestClient1, resources1);
         resources1.push_back(MediaResource(String8(kResourceGraphicMemory), 200));
         Vector<MediaResource> resources11;
         resources11.push_back(MediaResource(String8(kResourceGraphicMemory), 200));
-        mService->addResource(kTestPid1, (int64_t) mTestClient1.get(), mTestClient1, resources11);
+        mService->addResource(kTestPid1, getId(mTestClient1), mTestClient1, resources11);
 
         // kTestPid2 mTestClient2
         Vector<MediaResource> resources2;
         resources2.push_back(MediaResource(String8(kResourceNonSecureCodec), 1));
         resources2.push_back(MediaResource(String8(kResourceGraphicMemory), 300));
-        mService->addResource(kTestPid2, (int64_t) mTestClient2.get(), mTestClient2, resources2);
+        mService->addResource(kTestPid2, getId(mTestClient2), mTestClient2, resources2);
 
         // kTestPid2 mTestClient3
         Vector<MediaResource> resources3;
-        mService->addResource(kTestPid2, (int64_t) mTestClient3.get(), mTestClient3, resources3);
+        mService->addResource(kTestPid2, getId(mTestClient3), mTestClient3, resources3);
         resources3.push_back(MediaResource(String8(kResourceSecureCodec), 1));
         resources3.push_back(MediaResource(String8(kResourceGraphicMemory), 100));
-        mService->addResource(kTestPid2, (int64_t) mTestClient3.get(), mTestClient3, resources3);
+        mService->addResource(kTestPid2, getId(mTestClient3), mTestClient3, resources3);
 
         const PidResourceInfosMap &map = mService->mMap;
         EXPECT_EQ(2u, map.size());
@@ -213,7 +218,7 @@
     void testRemoveResource() {
         addResource();
 
-        mService->removeResource((int64_t) mTestClient2.get());
+        mService->removeResource(kTestPid2, getId(mTestClient2));
 
         const PidResourceInfosMap &map = mService->mMap;
         EXPECT_EQ(2u, map.size());
@@ -431,7 +436,7 @@
             verifyClients(true /* c1 */, false /* c2 */, false /* c3 */);
 
             // clean up client 3 which still left
-            mService->removeResource((int64_t) mTestClient3.get());
+            mService->removeResource(kTestPid2, getId(mTestClient3));
         }
     }