Merge "[WVDRM] Fix for stuttering on low bandwidth" into jb-mr1-dev
diff --git a/camera/CameraParameters.cpp b/camera/CameraParameters.cpp
index d10f2e5..fd91bf2 100644
--- a/camera/CameraParameters.cpp
+++ b/camera/CameraParameters.cpp
@@ -90,7 +90,6 @@
 const char CameraParameters::KEY_VIDEO_SNAPSHOT_SUPPORTED[] = "video-snapshot-supported";
 const char CameraParameters::KEY_VIDEO_STABILIZATION[] = "video-stabilization";
 const char CameraParameters::KEY_VIDEO_STABILIZATION_SUPPORTED[] = "video-stabilization-supported";
-const char CameraParameters::KEY_LIGHTFX[] = "light-fx";
 
 const char CameraParameters::TRUE[] = "true";
 const char CameraParameters::FALSE[] = "false";
@@ -168,10 +167,6 @@
 const char CameraParameters::FOCUS_MODE_CONTINUOUS_VIDEO[] = "continuous-video";
 const char CameraParameters::FOCUS_MODE_CONTINUOUS_PICTURE[] = "continuous-picture";
 
-// Values for light fx settings
-const char CameraParameters::LIGHTFX_LOWLIGHT[] = "low-light";
-const char CameraParameters::LIGHTFX_HDR[] = "high-dynamic-range";
-
 CameraParameters::CameraParameters()
                 : mMap()
 {
diff --git a/include/camera/CameraParameters.h b/include/camera/CameraParameters.h
index d521543..5540d32 100644
--- a/include/camera/CameraParameters.h
+++ b/include/camera/CameraParameters.h
@@ -525,10 +525,6 @@
     // stream and record stabilized videos.
     static const char KEY_VIDEO_STABILIZATION_SUPPORTED[];
 
-    // Supported modes for special effects with light.
-    // Example values: "lowlight,hdr".
-    static const char KEY_LIGHTFX[];
-
     // Value for KEY_ZOOM_SUPPORTED or KEY_SMOOTH_ZOOM_SUPPORTED.
     static const char TRUE[];
     static const char FALSE[];
@@ -668,12 +664,6 @@
     // other modes.
     static const char FOCUS_MODE_CONTINUOUS_PICTURE[];
 
-    // Values for light special effects
-    // Low-light enhancement mode
-    static const char LIGHTFX_LOWLIGHT[];
-    // High-dynamic range mode
-    static const char LIGHTFX_HDR[];
-
 private:
     DefaultKeyedVector<String8,String8>    mMap;
 };
diff --git a/include/media/nbaio/MonoPipe.h b/include/media/nbaio/MonoPipe.h
index c47bf6c..5fcfe9e 100644
--- a/include/media/nbaio/MonoPipe.h
+++ b/include/media/nbaio/MonoPipe.h
@@ -77,6 +77,17 @@
             void    setAvgFrames(size_t setpoint);
             size_t  maxFrames() const { return mMaxFrames; }
 
+            // Set the shutdown state for the write side of a pipe.
+            // This may be called by an unrelated thread.  When shutdown state is 'true',
+            // a write that would otherwise block instead returns a short transfer count.
+            // There is no guarantee how long it will take for the shutdown to be recognized,
+            // but it will not be an unbounded amount of time.
+            // The state can be restored to normal by calling shutdown(false).
+            void    shutdown(bool newState = true);
+
+            // Return true if the write side of a pipe is currently shutdown.
+            bool    isShutdown();
+
 private:
     // A pair of methods and a helper variable which allows the reader and the
     // writer to update and observe the values of mFront and mNextRdPTS in an
@@ -114,6 +125,8 @@
 
     int64_t offsetTimestampByAudioFrames(int64_t ts, size_t audFrames);
     LinearTransform mSamplesToLocalTime;
+
+    bool            mIsShutdown;    // whether shutdown(true) was called, no barriers are needed
 };
 
 }   // namespace android
diff --git a/include/media/stagefright/AudioSource.h b/include/media/stagefright/AudioSource.h
index de7edf3..99f3c3b 100644
--- a/include/media/stagefright/AudioSource.h
+++ b/include/media/stagefright/AudioSource.h
@@ -53,12 +53,6 @@
     status_t dataCallback(const AudioRecord::Buffer& buffer);
     virtual void signalBufferReturned(MediaBuffer *buffer);
 
-    // If useLooperTime == true, buffers will carry absolute timestamps
-    // as returned by ALooper::GetNowUs(), otherwise systemTime() is used
-    // and buffers contain timestamps relative to start time.
-    // The default is to _not_ use looper time.
-    void setUseLooperTime(bool useLooperTime);
-
 protected:
     virtual ~AudioSource();
 
@@ -94,8 +88,6 @@
 
     List<MediaBuffer * > mBuffersReceived;
 
-    bool mUseLooperTime;
-
     void trackMaxAmplitude(int16_t *data, int nSamples);
 
     // This is used to raise the volume from mute to the
diff --git a/include/media/stagefright/SurfaceMediaSource.h b/include/media/stagefright/SurfaceMediaSource.h
index f60a535..e56527d 100644
--- a/include/media/stagefright/SurfaceMediaSource.h
+++ b/include/media/stagefright/SurfaceMediaSource.h
@@ -116,6 +116,9 @@
     // To be called before start()
     status_t setMaxAcquiredBufferCount(size_t count);
 
+    // To be called before start()
+    status_t setUseAbsoluteTimestamps();
+
 protected:
 
     // Implementation of the BufferQueue::ConsumerListener interface.  These
@@ -193,8 +196,8 @@
     // Set to a default of 30 fps if not specified by the client side
     int32_t mFrameRate;
 
-    // mStopped is a flag to check if the recording is going on
-    bool mStopped;
+    // mStarted is a flag to check if the recording is going on
+    bool mStarted;
 
     // mNumFramesReceived indicates the number of frames recieved from
     // the client side
@@ -212,6 +215,8 @@
 
     size_t mMaxAcquiredBufferCount;
 
+    bool mUseAbsoluteTimestamps;
+
     // mFrameAvailableCondition condition used to indicate whether there
     // is a frame available for dequeuing
     Condition mFrameAvailableCondition;
diff --git a/include/media/stagefright/TimeSource.h b/include/media/stagefright/TimeSource.h
index 443673d..8f11e14 100644
--- a/include/media/stagefright/TimeSource.h
+++ b/include/media/stagefright/TimeSource.h
@@ -41,8 +41,6 @@
     virtual int64_t getRealTimeUs();
 
 private:
-    static int64_t GetSystemTimeUs();
-
     int64_t mStartTimeUs;
 };
 
diff --git a/media/libnbaio/MonoPipe.cpp b/media/libnbaio/MonoPipe.cpp
index bbdc8c1..e8d3d9b 100644
--- a/media/libnbaio/MonoPipe.cpp
+++ b/media/libnbaio/MonoPipe.cpp
@@ -41,7 +41,8 @@
         mWriteTsValid(false),
         // mWriteTs
         mSetpoint((reqFrames * 11) / 16),
-        mWriteCanBlock(writeCanBlock)
+        mWriteCanBlock(writeCanBlock),
+        mIsShutdown(false)
 {
     CCHelper tmpHelper;
     status_t res;
@@ -121,7 +122,7 @@
             android_atomic_release_store(written + mRear, &mRear);
             totalFramesWritten += written;
         }
-        if (!mWriteCanBlock) {
+        if (!mWriteCanBlock || mIsShutdown) {
             break;
         }
         count -= written;
@@ -299,4 +300,14 @@
     return ts + frame_lt_duration;
 }
 
+void MonoPipe::shutdown(bool newState)
+{
+    mIsShutdown = newState;
+}
+
+bool MonoPipe::isShutdown()
+{
+    return mIsShutdown;
+}
+
 }   // namespace android
diff --git a/media/libstagefright/AudioSource.cpp b/media/libstagefright/AudioSource.cpp
index 3248dbc..861aebe 100644
--- a/media/libstagefright/AudioSource.cpp
+++ b/media/libstagefright/AudioSource.cpp
@@ -54,9 +54,7 @@
       mSampleRate(sampleRate),
       mPrevSampleTimeUs(0),
       mNumFramesReceived(0),
-      mNumClientOwnedBuffers(0),
-      mUseLooperTime(false) {
-
+      mNumClientOwnedBuffers(0) {
     ALOGV("sampleRate: %d, channelCount: %d", sampleRate, channelCount);
     CHECK(channelCount == 1 || channelCount == 2);
 
@@ -102,12 +100,6 @@
     return mInitCheck;
 }
 
-void AudioSource::setUseLooperTime(bool useLooperTime) {
-    CHECK(!mStarted);
-
-    mUseLooperTime = useLooperTime;
-}
-
 status_t AudioSource::start(MetaData *params) {
     Mutex::Autolock autoLock(mLock);
     if (mStarted) {
@@ -280,8 +272,7 @@
 }
 
 status_t AudioSource::dataCallback(const AudioRecord::Buffer& audioBuffer) {
-    int64_t timeUs =
-        mUseLooperTime ? ALooper::GetNowUs() : (systemTime() / 1000ll);
+    int64_t timeUs = systemTime() / 1000ll;
 
     ALOGV("dataCallbackTimestamp: %lld us", timeUs);
     Mutex::Autolock autoLock(mLock);
@@ -300,9 +291,7 @@
     if (mNumFramesReceived == 0 && mPrevSampleTimeUs == 0) {
         mInitialReadTimeUs = timeUs;
         // Initial delay
-        if (mUseLooperTime) {
-            mStartTimeUs = timeUs;
-        } else if (mStartTimeUs > 0) {
+        if (mStartTimeUs > 0) {
             mStartTimeUs = timeUs - mStartTimeUs;
         } else {
             // Assume latency is constant.
diff --git a/media/libstagefright/SurfaceMediaSource.cpp b/media/libstagefright/SurfaceMediaSource.cpp
index 9d39d0e..3c002fc 100644
--- a/media/libstagefright/SurfaceMediaSource.cpp
+++ b/media/libstagefright/SurfaceMediaSource.cpp
@@ -42,12 +42,12 @@
     mNumPendingBuffers(0),
     mCurrentTimestamp(0),
     mFrameRate(30),
-    mStopped(false),
+    mStarted(false),
     mNumFramesReceived(0),
     mNumFramesEncoded(0),
     mFirstFrameTimestamp(0),
-    mMaxAcquiredBufferCount(4)  // XXX double-check the default
-{
+    mMaxAcquiredBufferCount(4),  // XXX double-check the default
+    mUseAbsoluteTimestamps(false) {
     ALOGV("SurfaceMediaSource");
 
     if (bufferWidth == 0 || bufferHeight == 0) {
@@ -80,7 +80,7 @@
 
 SurfaceMediaSource::~SurfaceMediaSource() {
     ALOGV("~SurfaceMediaSource");
-    CHECK(mStopped == true);
+    CHECK(!mStarted);
 }
 
 nsecs_t SurfaceMediaSource::getTimestamp() {
@@ -140,6 +140,8 @@
 
     Mutex::Autolock lock(mMutex);
 
+    CHECK(!mStarted);
+
     mStartTimeNs = 0;
     int64_t startTimeUs;
     int32_t bufferCount = 0;
@@ -171,6 +173,7 @@
     }
 
     mNumPendingBuffers = 0;
+    mStarted = true;
 
     return OK;
 }
@@ -185,13 +188,20 @@
     return OK;
 }
 
+status_t SurfaceMediaSource::setUseAbsoluteTimestamps() {
+    ALOGV("setUseAbsoluteTimestamps");
+    Mutex::Autolock lock(mMutex);
+    mUseAbsoluteTimestamps = true;
+
+    return OK;
+}
 
 status_t SurfaceMediaSource::stop()
 {
     ALOGV("stop");
     Mutex::Autolock lock(mMutex);
 
-    if (mStopped) {
+    if (!mStarted) {
         return OK;
     }
 
@@ -208,7 +218,7 @@
         mMediaBuffersAvailableCondition.wait(mMutex);
     }
 
-    mStopped = true;
+    mStarted = false;
     mFrameAvailableCondition.signal();
     mMediaBuffersAvailableCondition.signal();
 
@@ -270,7 +280,7 @@
 
     *buffer = NULL;
 
-    while (!mStopped && mNumPendingBuffers == mMaxAcquiredBufferCount) {
+    while (mStarted && mNumPendingBuffers == mMaxAcquiredBufferCount) {
         mMediaBuffersAvailableCondition.wait(mMutex);
     }
 
@@ -281,7 +291,7 @@
     BufferQueue::BufferItem item;
     // If the recording has started and the queue is empty, then just
     // wait here till the frames come in from the client side
-    while (!mStopped) {
+    while (mStarted) {
 
         status_t err = mBufferQueue->acquireBuffer(&item);
         if (err == BufferQueue::NO_BUFFER_AVAILABLE) {
@@ -295,7 +305,7 @@
             }
 
             // check for the timing of this buffer
-            if (mNumFramesReceived == 0) {
+            if (mNumFramesReceived == 0 && !mUseAbsoluteTimestamps) {
                 mFirstFrameTimestamp = item.mTimestamp;
                 // Initial delay
                 if (mStartTimeNs > 0) {
@@ -322,7 +332,7 @@
 
     // If the loop was exited as a result of stopping the recording,
     // it is OK
-    if (mStopped) {
+    if (!mStarted) {
         ALOGV("Read: SurfaceMediaSource is stopped. Returning ERROR_END_OF_STREAM.");
         return ERROR_END_OF_STREAM;
     }
diff --git a/media/libstagefright/ThrottledSource.cpp b/media/libstagefright/ThrottledSource.cpp
index b1fcafd..348a9d3 100644
--- a/media/libstagefright/ThrottledSource.cpp
+++ b/media/libstagefright/ThrottledSource.cpp
@@ -17,16 +17,10 @@
 #include "include/ThrottledSource.h"
 
 #include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/ALooper.h>
 
 namespace android {
 
-static int64_t getNowUs() {
-    struct timeval tv;
-    gettimeofday(&tv, NULL);
-
-    return (int64_t)tv.tv_usec + tv.tv_sec * 1000000ll;
-}
-
 ThrottledSource::ThrottledSource(
         const sp<DataSource> &source,
         int32_t bandwidthLimitBytesPerSecond)
@@ -52,7 +46,7 @@
 
     mTotalTransferred += n;
 
-    int64_t nowUs = getNowUs();
+    int64_t nowUs = ALooper::GetNowUs();
 
     if (mStartTimeUs < 0) {
         mStartTimeUs = nowUs;
diff --git a/media/libstagefright/TimeSource.cpp b/media/libstagefright/TimeSource.cpp
index d987fbf..041980f 100644
--- a/media/libstagefright/TimeSource.cpp
+++ b/media/libstagefright/TimeSource.cpp
@@ -17,24 +17,17 @@
 #include <stddef.h>
 #include <sys/time.h>
 
+#include <media/stagefright/foundation/ALooper.h>
 #include <media/stagefright/TimeSource.h>
 
 namespace android {
 
 SystemTimeSource::SystemTimeSource()
-    : mStartTimeUs(GetSystemTimeUs()) {
+    : mStartTimeUs(ALooper::GetNowUs()) {
 }
 
 int64_t SystemTimeSource::getRealTimeUs() {
-    return GetSystemTimeUs() - mStartTimeUs;
-}
-
-// static
-int64_t SystemTimeSource::GetSystemTimeUs() {
-    struct timeval tv;
-    gettimeofday(&tv, NULL);
-
-    return (int64_t)tv.tv_sec * 1000000 + tv.tv_usec;
+    return ALooper::GetNowUs() - mStartTimeUs;
 }
 
 }  // namespace android
diff --git a/media/libstagefright/TimedEventQueue.cpp b/media/libstagefright/TimedEventQueue.cpp
index 9df15eb..7e9c4bf 100644
--- a/media/libstagefright/TimedEventQueue.cpp
+++ b/media/libstagefright/TimedEventQueue.cpp
@@ -30,6 +30,7 @@
 #include <sys/time.h>
 
 #include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/ALooper.h>
 
 namespace android {
 
@@ -94,7 +95,7 @@
 TimedEventQueue::event_id TimedEventQueue::postEventWithDelay(
         const sp<Event> &event, int64_t delay_us) {
     CHECK(delay_us >= 0);
-    return postTimedEvent(event, getRealTimeUs() + delay_us);
+    return postTimedEvent(event, ALooper::GetNowUs() + delay_us);
 }
 
 TimedEventQueue::event_id TimedEventQueue::postTimedEvent(
@@ -179,14 +180,6 @@
 }
 
 // static
-int64_t TimedEventQueue::getRealTimeUs() {
-    struct timeval tv;
-    gettimeofday(&tv, NULL);
-
-    return (int64_t)tv.tv_sec * 1000000ll + tv.tv_usec;
-}
-
-// static
 void *TimedEventQueue::ThreadWrapper(void *me) {
 
     androidSetThreadPriority(0, ANDROID_PRIORITY_FOREGROUND);
@@ -225,7 +218,7 @@
                 List<QueueItem>::iterator it = mQueue.begin();
                 eventID = (*it).event->eventID();
 
-                now_us = getRealTimeUs();
+                now_us = ALooper::GetNowUs();
                 int64_t when_us = (*it).realtime_us;
 
                 int64_t delay_us;
@@ -258,7 +251,7 @@
                 if (!timeoutCapped && err == -ETIMEDOUT) {
                     // We finally hit the time this event is supposed to
                     // trigger.
-                    now_us = getRealTimeUs();
+                    now_us = ALooper::GetNowUs();
                     break;
                 }
             }
diff --git a/media/libstagefright/foundation/ALooper.cpp b/media/libstagefright/foundation/ALooper.cpp
index a5b316d..22777a2 100644
--- a/media/libstagefright/foundation/ALooper.cpp
+++ b/media/libstagefright/foundation/ALooper.cpp
@@ -63,10 +63,7 @@
 
 // static
 int64_t ALooper::GetNowUs() {
-    struct timeval tv;
-    gettimeofday(&tv, NULL);
-
-    return (int64_t)tv.tv_sec * 1000000ll + tv.tv_usec;
+    return systemTime(SYSTEM_TIME_MONOTONIC) / 1000ll;
 }
 
 ALooper::ALooper()
diff --git a/media/libstagefright/omx/tests/OMXHarness.cpp b/media/libstagefright/omx/tests/OMXHarness.cpp
index 4b369ed..6cca8da 100644
--- a/media/libstagefright/omx/tests/OMXHarness.cpp
+++ b/media/libstagefright/omx/tests/OMXHarness.cpp
@@ -27,6 +27,7 @@
 #include <binder/MemoryDealer.h>
 #include <media/IMediaPlayerService.h>
 #include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/ALooper.h>
 #include <media/stagefright/DataSource.h>
 #include <media/stagefright/MediaBuffer.h>
 #include <media/stagefright/MediaDefs.h>
@@ -40,13 +41,6 @@
 
 namespace android {
 
-static int64_t getNowUs() {
-    struct timeval tv;
-    gettimeofday(&tv, NULL);
-
-    return (int64_t)tv.tv_usec + tv.tv_sec * 1000000;
-}
-
 Harness::Harness()
     : mInitCheck(NO_INIT) {
     mInitCheck = initOMX();
@@ -126,7 +120,7 @@
         Vector<Buffer> *inputBuffers,
         Vector<Buffer> *outputBuffers,
         omx_message *msg, int64_t timeoutUs) {
-    int64_t finishBy = getNowUs() + timeoutUs;
+    int64_t finishBy = ALooper::GetNowUs() + timeoutUs;
 
     for (;;) {
         Mutex::Autolock autoLock(mLock);
@@ -150,7 +144,7 @@
         status_t err = (timeoutUs < 0)
             ? mMessageAddedCondition.wait(mLock)
             : mMessageAddedCondition.waitRelative(
-                    mLock, (finishBy - getNowUs()) * 1000);
+                    mLock, (finishBy - ALooper::GetNowUs()) * 1000);
 
         if (err == TIMED_OUT) {
             return err;
diff --git a/media/libstagefright/rtsp/ARTPAssembler.cpp b/media/libstagefright/rtsp/ARTPAssembler.cpp
index 1844bc5..c7a65c2 100644
--- a/media/libstagefright/rtsp/ARTPAssembler.cpp
+++ b/media/libstagefright/rtsp/ARTPAssembler.cpp
@@ -18,19 +18,13 @@
 
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/ALooper.h>
 #include <media/stagefright/foundation/AMessage.h>
 
 #include <stdint.h>
 
 namespace android {
 
-static int64_t getNowUs() {
-    struct timeval tv;
-    gettimeofday(&tv, NULL);
-
-    return (int64_t)tv.tv_usec + tv.tv_sec * 1000000ll;
-}
-
 ARTPAssembler::ARTPAssembler()
     : mFirstFailureTimeUs(-1) {
 }
@@ -42,7 +36,7 @@
 
         if (status == WRONG_SEQUENCE_NUMBER) {
             if (mFirstFailureTimeUs >= 0) {
-                if (getNowUs() - mFirstFailureTimeUs > 10000ll) {
+                if (ALooper::GetNowUs() - mFirstFailureTimeUs > 10000ll) {
                     mFirstFailureTimeUs = -1;
 
                     // LOG(VERBOSE) << "waited too long for packet.";
@@ -50,7 +44,7 @@
                     continue;
                 }
             } else {
-                mFirstFailureTimeUs = getNowUs();
+                mFirstFailureTimeUs = ALooper::GetNowUs();
             }
             break;
         } else {
diff --git a/media/libstagefright/wifi-display/source/Converter.cpp b/media/libstagefright/wifi-display/source/Converter.cpp
index 968a805..60cca69 100644
--- a/media/libstagefright/wifi-display/source/Converter.cpp
+++ b/media/libstagefright/wifi-display/source/Converter.cpp
@@ -132,7 +132,7 @@
         mOutputFormat->setInt32("bitrate", audioBitrate);
     } else {
         mOutputFormat->setInt32("bitrate", videoBitrate);
-        mOutputFormat->setInt32("frame-rate", 24);
+        mOutputFormat->setInt32("frame-rate", 60);
         mOutputFormat->setInt32("i-frame-interval", 1);  // Iframes every 1 secs
         // mOutputFormat->setInt32("prepend-sps-pps-to-idr-frames", 1);
     }
@@ -301,8 +301,6 @@
         if (buffer != NULL) {
             CHECK(buffer->meta()->findInt64("timeUs", &timeUs));
 
-            ALOGV("in: %s timeUs = %lld us", mIsVideo ? "video" : "audio", timeUs);
-
             memcpy(mEncoderInputBuffers.itemAt(bufferIndex)->data(),
                    buffer->data(),
                    buffer->size());
@@ -353,8 +351,6 @@
             notify->setInt32("what", kWhatEOS);
             notify->post();
         } else {
-            ALOGV("out: %s timeUs = %lld us", mIsVideo ? "video" : "audio", timeUs);
-
             sp<ABuffer> buffer = new ABuffer(size);
             buffer->meta()->setInt64("timeUs", timeUs);
 
diff --git a/media/libstagefright/wifi-display/source/MediaPuller.cpp b/media/libstagefright/wifi-display/source/MediaPuller.cpp
index 82ae001..ab69c4a 100644
--- a/media/libstagefright/wifi-display/source/MediaPuller.cpp
+++ b/media/libstagefright/wifi-display/source/MediaPuller.cpp
@@ -75,7 +75,16 @@
     switch (msg->what()) {
         case kWhatStart:
         {
-            status_t err = mSource->start();
+            status_t err;
+            if (mIsAudio) {
+                // This atrocity causes AudioSource to deliver absolute
+                // systemTime() based timestamps (off by 1 us).
+                sp<MetaData> params = new MetaData;
+                params->setInt64(kKeyTime, 1ll);
+                err = mSource->start(params.get());
+            } else {
+                err = mSource->start();
+            }
 
             if (err == OK) {
                 schedulePull();
diff --git a/media/libstagefright/wifi-display/source/PlaybackSession.cpp b/media/libstagefright/wifi-display/source/PlaybackSession.cpp
index 775f23b..7de607c 100644
--- a/media/libstagefright/wifi-display/source/PlaybackSession.cpp
+++ b/media/libstagefright/wifi-display/source/PlaybackSession.cpp
@@ -75,8 +75,6 @@
     status_t start();
     void stopAsync();
 
-    bool isStopped() const { return !mStarted; }
-
     void queueAccessUnit(const sp<ABuffer> &accessUnit);
     sp<ABuffer> dequeueAccessUnit();
 
@@ -175,13 +173,11 @@
 void WifiDisplaySource::PlaybackSession::Track::stopAsync() {
     ALOGV("Track::stopAsync isAudio=%d", mIsAudio);
 
-    CHECK(mStarted);
-
     mConverter->shutdownAsync();
 
     sp<AMessage> msg = new AMessage(kWhatMediaPullerStopped, id());
 
-    if (mMediaPuller != NULL) {
+    if (mStarted && mMediaPuller != NULL) {
         mMediaPuller->stopAsync(msg);
     } else {
         msg->post();
@@ -200,6 +196,8 @@
             sp<AMessage> notify = mNotify->dup();
             notify->setInt32("what", kWhatStopped);
             notify->post();
+
+            ALOGI("kWhatStopped %s posted", mIsAudio ? "audio" : "video");
             break;
         }
 
@@ -267,9 +265,11 @@
       mNumRTPOctetsSent(0),
       mNumSRsSent(0),
       mSendSRPending(false),
-      mFirstPacketTimeUs(-1ll),
-      mHistoryLength(0),
-      mTotalBytesSent(0ll)
+      mHistoryLength(0)
+#if TRACK_BANDWIDTH
+      ,mFirstPacketTimeUs(-1ll)
+      ,mTotalBytesSent(0ll)
+#endif
 #if LOG_TRANSPORT_STREAM
       ,mLogFile(NULL)
 #endif
@@ -524,10 +524,6 @@
 
 void WifiDisplaySource::PlaybackSession::onMessageReceived(
         const sp<AMessage> &msg) {
-    if (mWeAreDead) {
-        return;
-    }
-
     switch (msg->what()) {
         case kWhatRTPNotify:
         case kWhatRTCPNotify:
@@ -659,6 +655,13 @@
 
         case kWhatConverterNotify:
         {
+            if (mWeAreDead) {
+                ALOGV("dropping msg '%s' because we're dead",
+                      msg->debugString().c_str());
+
+                break;
+            }
+
             int32_t what;
             CHECK(msg->findInt32("what", &what));
 
@@ -747,21 +750,18 @@
             CHECK(msg->findSize("trackIndex", &trackIndex));
 
             if (what == Track::kWhatStopped) {
-                bool allTracksAreStopped = true;
-                for (size_t i = 0; i < mTracks.size(); ++i) {
-                    const sp<Track> &track = mTracks.valueAt(i);
-                    if (!track->isStopped()) {
-                        allTracksAreStopped = false;
-                        break;
-                    }
-                }
+                ALOGI("Track %d stopped", trackIndex);
 
-                if (!allTracksAreStopped) {
+                sp<Track> track = mTracks.valueFor(trackIndex);
+                looper()->unregisterHandler(track->id());
+                mTracks.removeItem(trackIndex);
+                track.clear();
+
+                if (!mTracks.isEmpty()) {
+                    ALOGI("not all tracks are stopped yet");
                     break;
                 }
 
-                mTracks.clear();
-
                 mPacketizer.clear();
 
 #if ENABLE_RETRANSMISSION
@@ -883,11 +883,10 @@
 status_t WifiDisplaySource::PlaybackSession::addVideoSource() {
     sp<SurfaceMediaSource> source = new SurfaceMediaSource(width(), height());
 
-    sp<MediaSource> videoSource =
-            new RepeaterSource(source, 24.0 /* rateHz */);
+    source->setUseAbsoluteTimestamps();
 
     size_t numInputBuffers;
-    status_t err = addSource(true /* isVideo */, videoSource, &numInputBuffers);
+    status_t err = addSource(true /* isVideo */, source, &numInputBuffers);
 
     if (err != OK) {
         return err;
@@ -908,8 +907,6 @@
             2 /* channelCount */);
 
     if (audioSource->initCheck() == OK) {
-        audioSource->setUseLooperTime(true);
-
         return addSource(
                 false /* isVideo */, audioSource, NULL /* numInputBuffers */);
     }
@@ -1074,12 +1071,15 @@
         // flush
 
         int64_t nowUs = ALooper::GetNowUs();
+
+#if TRACK_BANDWIDTH
         if (mFirstPacketTimeUs < 0ll) {
             mFirstPacketTimeUs = nowUs;
         }
+#endif
 
         // 90kHz time scale
-        uint32_t rtpTime = ((nowUs - mFirstPacketTimeUs) * 9ll) / 100ll;
+        uint32_t rtpTime = (nowUs * 9ll) / 100ll;
 
         uint8_t *rtp = mTSQueue->data();
         rtp[0] = 0x80;
@@ -1115,13 +1115,15 @@
         } else {
             sendPacket(mRTPSessionID, rtp, mTSQueue->size());
 
+#if TRACK_BANDWIDTH
             mTotalBytesSent += mTSQueue->size();
             int64_t delayUs = ALooper::GetNowUs() - mFirstPacketTimeUs;
 
             if (delayUs > 0ll) {
-                ALOGV("approx. net bandwidth used: %.2f Mbit/sec",
+                ALOGI("approx. net bandwidth used: %.2f Mbit/sec",
                         mTotalBytesSent * 8.0 / delayUs);
             }
+#endif
         }
 
         mTSQueue->setInt32Data(mRTPSeqNo - 1);
@@ -1318,10 +1320,6 @@
     return true;
 }
 
-static inline size_t MIN(size_t a, size_t b) {
-    return (a < b) ? a : b;
-}
-
 status_t WifiDisplaySource::PlaybackSession::packetizeAccessUnit(
         size_t trackIndex, sp<ABuffer> accessUnit) {
     const sp<Track> &track = mTracks.valueFor(trackIndex);
@@ -1334,11 +1332,6 @@
     if (mHDCP != NULL && !track->isAudio()) {
         isHDCPEncrypted = true;
 
-#if 0
-        ALOGI("in:");
-        hexdump(accessUnit->data(), MIN(64, accessUnit->size()));
-#endif
-
         if (IsIDR(accessUnit)) {
             // XXX remove this once the encoder takes care of this.
             accessUnit = mPacketizer->prependCSD(
@@ -1356,13 +1349,6 @@
                   err);
 
             return err;
-        } else {
-#if 0
-            ALOGI("out:");
-            hexdump(accessUnit->data(), MIN(64, accessUnit->size()));
-            ALOGI("inputCTR: 0x%016llx", inputCTR);
-            ALOGI("streamCTR: 0x%08x", trackIndex);
-#endif
         }
 
         HDCP_private_data[0] = 0x00;
diff --git a/media/libstagefright/wifi-display/source/PlaybackSession.h b/media/libstagefright/wifi-display/source/PlaybackSession.h
index 9237a72..8d88648 100644
--- a/media/libstagefright/wifi-display/source/PlaybackSession.h
+++ b/media/libstagefright/wifi-display/source/PlaybackSession.h
@@ -32,6 +32,7 @@
 
 #define LOG_TRANSPORT_STREAM            0
 #define ENABLE_RETRANSMISSION           0
+#define TRACK_BANDWIDTH                 0
 
 // Encapsulates the state of an RTP/RTCP session in the context of wifi
 // display.
@@ -160,12 +161,13 @@
 
     bool mSendSRPending;
 
-    int64_t mFirstPacketTimeUs;
-
     List<sp<ABuffer> > mHistory;
     size_t mHistoryLength;
 
+#if TRACK_BANDWIDTH
+    int64_t mFirstPacketTimeUs;
     uint64_t mTotalBytesSent;
+#endif
 
 #if LOG_TRANSPORT_STREAM
     FILE *mLogFile;
diff --git a/media/libstagefright/wifi-display/source/RepeaterSource.cpp b/media/libstagefright/wifi-display/source/RepeaterSource.cpp
index 483d29c..dc216e8 100644
--- a/media/libstagefright/wifi-display/source/RepeaterSource.cpp
+++ b/media/libstagefright/wifi-display/source/RepeaterSource.cpp
@@ -13,7 +13,8 @@
 namespace android {
 
 RepeaterSource::RepeaterSource(const sp<MediaSource> &source, double rateHz)
-    : mSource(source),
+    : mStarted(false),
+      mSource(source),
       mRateHz(rateHz),
       mBuffer(NULL),
       mResult(OK),
@@ -22,10 +23,12 @@
 }
 
 RepeaterSource::~RepeaterSource() {
-    stop();
+    CHECK(!mStarted);
 }
 
 status_t RepeaterSource::start(MetaData *params) {
+    CHECK(!mStarted);
+
     status_t err = mSource->start(params);
 
     if (err != OK) {
@@ -46,10 +49,14 @@
 
     postRead();
 
+    mStarted = true;
+
     return OK;
 }
 
 status_t RepeaterSource::stop() {
+    CHECK(mStarted);
+
     ALOGV("stopping");
 
     if (mLooper != NULL) {
@@ -69,6 +76,8 @@
 
     ALOGV("stopped");
 
+    mStarted = false;
+
     return err;
 }
 
diff --git a/media/libstagefright/wifi-display/source/RepeaterSource.h b/media/libstagefright/wifi-display/source/RepeaterSource.h
index 31eb5cd..3049362 100644
--- a/media/libstagefright/wifi-display/source/RepeaterSource.h
+++ b/media/libstagefright/wifi-display/source/RepeaterSource.h
@@ -33,6 +33,8 @@
     Mutex mLock;
     Condition mCondition;
 
+    bool mStarted;
+
     sp<MediaSource> mSource;
     double mRateHz;
 
diff --git a/media/libstagefright/wifi-display/source/WifiDisplaySource.cpp b/media/libstagefright/wifi-display/source/WifiDisplaySource.cpp
index 8091cc4..1083a80 100644
--- a/media/libstagefright/wifi-display/source/WifiDisplaySource.cpp
+++ b/media/libstagefright/wifi-display/source/WifiDisplaySource.cpp
@@ -41,7 +41,8 @@
 WifiDisplaySource::WifiDisplaySource(
         const sp<ANetworkSession> &netSession,
         const sp<IRemoteDisplayClient> &client)
-    : mNetSession(netSession),
+    : mState(INITIALIZED),
+      mNetSession(netSession),
       mClient(client),
       mSessionID(0),
       mStopReplyID(0),
@@ -61,6 +62,8 @@
 }
 
 status_t WifiDisplaySource::start(const char *iface) {
+    CHECK_EQ(mState, INITIALIZED);
+
     sp<AMessage> msg = new AMessage(kWhatStart, id());
     msg->setString("iface", iface);
 
@@ -137,6 +140,10 @@
                 }
             }
 
+            if (err == OK) {
+                mState = AWAITING_CLIENT_CONNECTION;
+            }
+
             sp<AMessage> response = new AMessage;
             response->setInt32("err", err);
             response->postReply(replyID);
@@ -190,6 +197,8 @@
                         break;
                     }
 
+                    CHECK_EQ(mState, AWAITING_CLIENT_CONNECTION);
+
                     CHECK(msg->findString("client-ip", &mClientInfo.mRemoteIP));
                     CHECK(msg->findString("server-ip", &mClientInfo.mLocalIP));
 
@@ -208,6 +217,8 @@
 
                     ALOGI("We now have a client (%d) connected.", sessionID);
 
+                    mState = AWAITING_CLIENT_SETUP;
+
                     status_t err = sendM1(sessionID);
                     CHECK_EQ(err, (status_t)OK);
                     break;
@@ -234,14 +245,24 @@
         {
             CHECK(msg->senderAwaitsResponse(&mStopReplyID));
 
-            if (mClientSessionID != 0
-                    && mClientInfo.mPlaybackSessionID != -1) {
+            CHECK_LT(mState, AWAITING_CLIENT_TEARDOWN);
+
+            if (mState >= AWAITING_CLIENT_PLAY) {
+                // We have a session, i.e. a previous SETUP succeeded.
+
                 status_t err = sendM5(
                         mClientSessionID, true /* requestShutdown */);
 
                 if (err == OK) {
+                    mState = AWAITING_CLIENT_TEARDOWN;
+
+                    (new AMessage(kWhatTeardownTriggerTimedOut, id()))->post(
+                            kTeardownTriggerTimeouSecs * 1000000ll);
+
                     break;
                 }
+
+                // fall through.
             }
 
             finishStop();
@@ -291,7 +312,16 @@
                             mClientInfo.mPlaybackSession->getSurfaceTexture(),
                             mClientInfo.mPlaybackSession->width(),
                             mClientInfo.mPlaybackSession->height(),
-                            0 /* flags */);
+#if REQUIRE_HDCP
+                            IRemoteDisplayClient::kDisplayFlagSecure
+#else
+                            0 /* flags */
+#endif
+                            );
+                }
+
+                if (mState == ABOUT_TO_PLAY) {
+                    mState = PLAYING;
                 }
             } else if (what == PlaybackSession::kWhatSessionDestroyed) {
                 disconnectClient2();
@@ -339,6 +369,18 @@
             break;
         }
 
+        case kWhatTeardownTriggerTimedOut:
+        {
+            if (mState == AWAITING_CLIENT_TEARDOWN) {
+                ALOGI("TEARDOWN trigger timed out, forcing disconnection.");
+
+                CHECK_NE(mStopReplyID, 0);
+                finishStop();
+                break;
+            }
+            break;
+        }
+
 #if REQUIRE_HDCP
         case kWhatHDCPNotify:
         {
@@ -497,7 +539,7 @@
     //   use "78 00 02 02 00008000 00000000 00000000 00 0000 0000 00 none none\r\n"
     AString body = StringPrintf(
         "wfd_video_formats: "
-        "78 00 02 02 00008000 00000000 00000000 00 0000 0000 00 none none\r\n"
+        "30 00 02 02 00000040 00000000 00000000 00 0000 0000 00 none none\r\n"
         "wfd_audio_codecs: AAC 00000001 00\r\n"  // 2 ch AAC 48kHz
         "wfd_presentation_URL: rtsp://%s/wfd1.0/streamid=0 none\r\n"
         "wfd_client_rtp_ports: RTP/AVP/%s;unicast 19000 0 mode=play\r\n",
@@ -1020,6 +1062,8 @@
         return err;
     }
 
+    mState = AWAITING_CLIENT_PLAY;
+
     scheduleReaper();
     scheduleKeepAlive(sessionID);
 
@@ -1057,6 +1101,9 @@
 
     playbackSession->finishPlay();
 
+    CHECK_EQ(mState, AWAITING_CLIENT_PLAY);
+    mState = ABOUT_TO_PLAY;
+
     return OK;
 }
 
@@ -1107,7 +1154,8 @@
 
     mNetSession->sendRequest(sessionID, response.c_str());
 
-    if (mStopReplyID != 0) {
+    if (mState == AWAITING_CLIENT_TEARDOWN) {
+        CHECK_NE(mStopReplyID, 0);
         finishStop();
     } else {
         mClient->onDisplayError(IRemoteDisplayClient::kDisplayErrorUnknown);
@@ -1119,6 +1167,8 @@
 void WifiDisplaySource::finishStop() {
     ALOGV("finishStop");
 
+    mState = STOPPING;
+
     disconnectClientAsync();
 }
 
@@ -1140,9 +1190,11 @@
     ALOGV("finishStop2");
 
 #if REQUIRE_HDCP
-    mHDCP->setObserver(NULL);
-    mHDCPObserver.clear();
-    mHDCP.clear();
+    if (mHDCP != NULL) {
+        mHDCP->setObserver(NULL);
+        mHDCPObserver.clear();
+        mHDCP.clear();
+    }
 #endif
 
     if (mSessionID != 0) {
@@ -1151,6 +1203,7 @@
     }
 
     ALOGI("We're stopped.");
+    mState = STOPPED;
 
     status_t err = OK;
 
diff --git a/media/libstagefright/wifi-display/source/WifiDisplaySource.h b/media/libstagefright/wifi-display/source/WifiDisplaySource.h
index ade623a..8c043cd 100644
--- a/media/libstagefright/wifi-display/source/WifiDisplaySource.h
+++ b/media/libstagefright/wifi-display/source/WifiDisplaySource.h
@@ -55,6 +55,18 @@
     struct HDCPObserver;
 #endif
 
+    enum State {
+        INITIALIZED,
+        AWAITING_CLIENT_CONNECTION,
+        AWAITING_CLIENT_SETUP,
+        AWAITING_CLIENT_PLAY,
+        ABOUT_TO_PLAY,
+        PLAYING,
+        AWAITING_CLIENT_TEARDOWN,
+        STOPPING,
+        STOPPED,
+    };
+
     enum {
         kWhatStart,
         kWhatRTSPNotify,
@@ -64,6 +76,7 @@
         kWhatKeepAlive,
         kWhatHDCPNotify,
         kWhatFinishStop2,
+        kWhatTeardownTriggerTimedOut,
     };
 
     struct ResponseID {
@@ -82,11 +95,18 @@
 
     static const int64_t kReaperIntervalUs = 1000000ll;
 
+    // We request that the dongle send us a "TEARDOWN" in order to
+    // perform an orderly shutdown. We're willing to wait up to 2 secs
+    // for this message to arrive, after that we'll force a disconnect
+    // instead.
+    static const int64_t kTeardownTriggerTimeouSecs = 2;
+
     static const int64_t kPlaybackSessionTimeoutSecs = 30;
 
     static const int64_t kPlaybackSessionTimeoutUs =
         kPlaybackSessionTimeoutSecs * 1000000ll;
 
+    State mState;
     sp<ANetworkSession> mNetSession;
     sp<IRemoteDisplayClient> mClient;
     struct in_addr mInterfaceAddr;
diff --git a/services/audioflinger/Android.mk b/services/audioflinger/Android.mk
index b9e32381..7a1c020 100644
--- a/services/audioflinger/Android.mk
+++ b/services/audioflinger/Android.mk
@@ -73,7 +73,7 @@
 # LOCAL_CFLAGS += -DTEE_SINK_FRAMES=0x200000
 
 # uncomment to enable the audio watchdog
-LOCAL_SRC_FILES += AudioWatchdog.cpp
-LOCAL_CFLAGS += -DAUDIO_WATCHDOG
+# LOCAL_SRC_FILES += AudioWatchdog.cpp
+# LOCAL_CFLAGS += -DAUDIO_WATCHDOG
 
 include $(BUILD_SHARED_LIBRARY)
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 9689654..aa30864 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -1163,6 +1163,8 @@
 void AudioFlinger::ThreadBase::exit()
 {
     ALOGV("ThreadBase::exit");
+    // do any cleanup required for exit to succeed
+    preExit();
     {
         // This lock prevents the following race in thread (uniprocessor for illustration):
         //  if (!exitPending()) {
@@ -1694,6 +1696,15 @@
     run(mName, ANDROID_PRIORITY_URGENT_AUDIO);
 }
 
+// ThreadBase virtuals
+void AudioFlinger::PlaybackThread::preExit()
+{
+    ALOGV("  preExit()");
+    // FIXME this is using hard-coded strings but in the future, this functionality will be
+    //       converted to use audio HAL extensions required to support tunneling
+    mOutput->stream->common.set_parameters(&mOutput->stream->common, "exiting=1");
+}
+
 // PlaybackThread::createTrack_l() must be called with AudioFlinger::mLock held
 sp<AudioFlinger::PlaybackThread::Track> AudioFlinger::PlaybackThread::createTrack_l(
         const sp<AudioFlinger::Client>& client,
@@ -2408,11 +2419,13 @@
         delete fastTrack->mBufferProvider;
         sq->end(false /*didModify*/);
         delete mFastMixer;
+#ifdef AUDIO_WATCHDOG
         if (mAudioWatchdog != 0) {
             mAudioWatchdog->requestExit();
             mAudioWatchdog->requestExitAndWait();
             mAudioWatchdog.clear();
         }
+#endif
     }
     delete mAudioMixer;
 }
@@ -2725,9 +2738,11 @@
                 if (old == -1) {
                     __futex_syscall3(&mFastMixerFutex, FUTEX_WAKE_PRIVATE, 1);
                 }
+#ifdef AUDIO_WATCHDOG
                 if (mAudioWatchdog != 0) {
                     mAudioWatchdog->resume();
                 }
+#endif
             }
             state->mCommand = FastMixerState::MIX_WRITE;
             sq->end();
@@ -2804,9 +2819,11 @@
             if (kUseFastMixer == FastMixer_Dynamic) {
                 mNormalSink = mOutputSink;
             }
+#ifdef AUDIO_WATCHDOG
             if (mAudioWatchdog != 0) {
                 mAudioWatchdog->pause();
             }
+#endif
         } else {
             sq->end(false /*didModify*/);
         }
@@ -3319,9 +3336,11 @@
         sq->end(didModify);
         sq->push(block);
     }
+#ifdef AUDIO_WATCHDOG
     if (pauseAudioWatchdog && mAudioWatchdog != 0) {
         mAudioWatchdog->pause();
     }
+#endif
 
     // Now perform the deferred reset on fast tracks that have stopped
     while (resetMask != 0) {
@@ -3643,11 +3662,13 @@
         }
     }
 
+#ifdef AUDIO_WATCHDOG
     if (mAudioWatchdog != 0) {
         // Make a non-atomic copy of audio watchdog dump so it won't change underneath us
         AudioWatchdogDump wdCopy = mAudioWatchdogDump;
         wdCopy.dump(fd);
     }
+#endif
 }
 
 uint32_t AudioFlinger::MixerThread::idleSleepTimeUs() const
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index 45cee0b..49e2b2c 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -682,6 +682,8 @@
                     // check if some effects must be suspended when an effect chain is added
                     void checkSuspendOnAddEffectChain_l(const sp<EffectChain>& chain);
 
+        virtual     void        preExit() { }
+
         friend class AudioFlinger;      // for mEffectChains
 
                     const type_t            mType;
@@ -1050,6 +1052,9 @@
                     // is safe to do so. That will drop the final ref count and destroy the tracks.
         virtual     mixer_state prepareTracks_l(Vector< sp<Track> > *tracksToRemove) = 0;
 
+        // ThreadBase virtuals
+        virtual     void        preExit();
+
 public:
 
         virtual     status_t    initCheck() const { return (mOutput == NULL) ? NO_INIT : NO_ERROR; }
diff --git a/services/camera/libcameraservice/Android.mk b/services/camera/libcameraservice/Android.mk
index c7927fe..eff47c8 100644
--- a/services/camera/libcameraservice/Android.mk
+++ b/services/camera/libcameraservice/Android.mk
@@ -14,6 +14,7 @@
     camera2/CameraMetadata.cpp \
     camera2/Parameters.cpp \
     camera2/FrameProcessor.cpp \
+    camera2/StreamingProcessor.cpp \
     camera2/JpegProcessor.cpp \
     camera2/CallbackProcessor.cpp \
     camera2/ZslProcessor.cpp \
diff --git a/services/camera/libcameraservice/Camera2Client.cpp b/services/camera/libcameraservice/Camera2Client.cpp
index d315abb..33e0b56 100644
--- a/services/camera/libcameraservice/Camera2Client.cpp
+++ b/services/camera/libcameraservice/Camera2Client.cpp
@@ -24,7 +24,6 @@
 #include <cutils/properties.h>
 #include <gui/SurfaceTextureClient.h>
 #include <gui/Surface.h>
-#include <media/hardware/MetadataBufferType.h>
 #include "camera2/Parameters.h"
 #include "Camera2Client.h"
 
@@ -52,10 +51,7 @@
         Client(cameraService, cameraClient,
                 cameraId, cameraFacing, clientPid),
         mSharedCameraClient(cameraClient),
-        mParameters(cameraId, cameraFacing),
-        mPreviewStreamId(NO_STREAM),
-        mRecordingStreamId(NO_STREAM),
-        mRecordingHeapCount(kDefaultRecordingHeapCount)
+        mParameters(cameraId, cameraFacing)
 {
     ATRACE_CALL();
     ALOGI("Camera %d: Opened", cameraId);
@@ -101,6 +97,8 @@
 
     String8 threadName;
 
+    mStreamingProcessor = new StreamingProcessor(this);
+
     mFrameProcessor = new FrameProcessor(this);
     threadName = String8::format("C2-%d-FrameProc",
             mCameraId);
@@ -140,9 +138,12 @@
 
     mDestructionStarted = true;
 
-    // Rewrite mClientPid to allow shutdown by CameraService
-    mClientPid = getCallingPid();
-    disconnect();
+    SharedParameters::Lock l(mParameters);
+    if (l.mParameters.state != Parameters::DISCONNECTED) {
+        // Rewrite mClientPid to allow shutdown by CameraService
+        mClientPid = getCallingPid();
+        disconnect();
+    }
 
     ALOGI("Camera %d: Closed", mCameraId);
 }
@@ -314,25 +315,9 @@
             getCaptureStreamId());
     result.appendFormat("    Recording stream ID: %d\n",
             getRecordingStreamId());
+    write(fd, result.string(), result.size());
 
-    result.append("  Current requests:\n");
-    if (mPreviewRequest.entryCount() != 0) {
-        result.append("    Preview request:\n");
-        write(fd, result.string(), result.size());
-        mPreviewRequest.dump(fd, 2, 6);
-    } else {
-        result.append("    Preview request: undefined\n");
-        write(fd, result.string(), result.size());
-    }
-
-    if (mRecordingRequest.entryCount() != 0) {
-        result = "    Recording request:\n";
-        write(fd, result.string(), result.size());
-        mRecordingRequest.dump(fd, 2, 6);
-    } else {
-        result = "    Recording request: undefined\n";
-        write(fd, result.string(), result.size());
-    }
+    mStreamingProcessor->dump(fd, args);
 
     mCaptureSequencer->dump(fd, args);
 
@@ -373,20 +358,10 @@
         l.mParameters.state = Parameters::DISCONNECTED;
     }
 
-    if (mPreviewStreamId != NO_STREAM) {
-        mDevice->deleteStream(mPreviewStreamId);
-        mPreviewStreamId = NO_STREAM;
-    }
-
+    mStreamingProcessor->deletePreviewStream();
+    mStreamingProcessor->deleteRecordingStream();
     mJpegProcessor->deleteStream();
-
-    if (mRecordingStreamId != NO_STREAM) {
-        mDevice->deleteStream(mRecordingStreamId);
-        mRecordingStreamId = NO_STREAM;
-    }
-
     mCallbackProcessor->deleteStream();
-
     mZslProcessor->deleteStream();
 
     mFrameProcessor->requestExit();
@@ -546,24 +521,13 @@
             break;
     }
 
-    if (mPreviewStreamId != NO_STREAM) {
-        res = mDevice->waitUntilDrained();
-        if (res != OK) {
-            ALOGE("%s: Error waiting for preview to drain: %s (%d)",
-                    __FUNCTION__, strerror(-res), res);
-            return res;
-        }
-        res = mDevice->deleteStream(mPreviewStreamId);
-        if (res != OK) {
-            ALOGE("%s: Unable to delete old preview stream: %s (%d)",
-                    __FUNCTION__, strerror(-res), res);
-            return res;
-        }
-        mPreviewStreamId = NO_STREAM;
-    }
-
     mPreviewSurface = binder;
-    mPreviewWindow = window;
+    res = mStreamingProcessor->setPreviewWindow(window);
+    if (res != OK) {
+        ALOGE("%s: Unable to set new preview window: %s (%d)",
+                __FUNCTION__, strerror(-res), res);
+        return res;
+    }
 
     if (l.mParameters.state == Parameters::WAITING_FOR_PREVIEW_WINDOW) {
         return startPreviewL(l.mParameters, false);
@@ -637,18 +601,20 @@
         return INVALID_OPERATION;
     }
 
-    if (mPreviewWindow == 0) {
+    if (!mStreamingProcessor->haveValidPreviewWindow()) {
         params.state = Parameters::WAITING_FOR_PREVIEW_WINDOW;
         return OK;
     }
     params.state = Parameters::STOPPED;
 
-    res = updatePreviewStream(params);
+    res = mStreamingProcessor->updatePreviewStream(params);
     if (res != OK) {
         ALOGE("%s: Camera %d: Unable to update preview stream: %s (%d)",
                 __FUNCTION__, mCameraId, strerror(-res), res);
         return res;
     }
+
+    Vector<uint8_t> outputStreams;
     bool callbacksEnabled = params.previewCallbackFlags &
         CAMERA_FRAME_CALLBACK_FLAG_ENABLE_MASK;
     if (callbacksEnabled) {
@@ -658,6 +624,7 @@
                     __FUNCTION__, mCameraId, strerror(-res), res);
             return res;
         }
+        outputStreams.push(getCallbackStreamId());
     }
     if (params.zslMode && !params.recordingHint) {
         res = mZslProcessor->updateStream(params);
@@ -666,38 +633,28 @@
                     __FUNCTION__, mCameraId, strerror(-res), res);
             return res;
         }
+        outputStreams.push(getZslStreamId());
     }
 
-    CameraMetadata *request;
+    outputStreams.push(getPreviewStreamId());
+
     if (!params.recordingHint) {
-        if (mPreviewRequest.entryCount() == 0) {
-            res = updatePreviewRequest(params);
+        if (!restart) {
+            res = mStreamingProcessor->updatePreviewRequest(params);
             if (res != OK) {
-                ALOGE("%s: Camera %d: Unable to create preview request: %s (%d)",
-                        __FUNCTION__, mCameraId, strerror(-res), res);
+                ALOGE("%s: Camera %d: Can't set up preview request: "
+                        "%s (%d)", __FUNCTION__, mCameraId,
+                        strerror(-res), res);
                 return res;
             }
         }
-        request = &mPreviewRequest;
+        res = mStreamingProcessor->startStream(StreamingProcessor::PREVIEW,
+                outputStreams);
     } else {
         // With recording hint set, we're going to be operating under the
         // assumption that the user will record video. To optimize recording
         // startup time, create the necessary output streams for recording and
         // video snapshot now if they don't already exist.
-        if (mRecordingRequest.entryCount() == 0) {
-            res = updateRecordingRequest(params);
-            if (res != OK) {
-                ALOGE("%s: Camera %d: Unable to create recording preview "
-                        "request: %s (%d)",
-                        __FUNCTION__, mCameraId, strerror(-res), res);
-                return res;
-            }
-        }
-        request = &mRecordingRequest;
-
-        // TODO: Re-enable recording stream creation/update here once issues are
-        // resolved
-
         res = mJpegProcessor->updateStream(params);
         if (res != OK) {
             ALOGE("%s: Camera %d: Can't pre-configure still image "
@@ -705,43 +662,26 @@
                     __FUNCTION__, mCameraId, strerror(-res), res);
             return res;
         }
+
+        if (!restart) {
+            res = mStreamingProcessor->updateRecordingRequest(params);
+            if (res != OK) {
+                ALOGE("%s: Camera %d: Can't set up preview request with "
+                        "record hint: %s (%d)", __FUNCTION__, mCameraId,
+                        strerror(-res), res);
+                return res;
+            }
+        }
+        res = mStreamingProcessor->startStream(StreamingProcessor::RECORD,
+                outputStreams);
     }
-
-    Vector<uint8_t> outputStreams;
-    outputStreams.push(getPreviewStreamId());
-
-    if (callbacksEnabled) {
-        outputStreams.push(getCallbackStreamId());
-    }
-    if (params.zslMode && !params.recordingHint) {
-        outputStreams.push(getZslStreamId());
-    }
-
-    res = request->update(
-        ANDROID_REQUEST_OUTPUT_STREAMS,
-        outputStreams);
-
     if (res != OK) {
-        ALOGE("%s: Camera %d: Unable to set up preview request: %s (%d)",
-                __FUNCTION__, mCameraId, strerror(-res), res);
-        return res;
-    }
-    res = request->sort();
-    if (res != OK) {
-        ALOGE("%s: Camera %d: Error sorting preview request: %s (%d)",
+        ALOGE("%s: Camera %d: Unable to start streaming preview: %s (%d)",
                 __FUNCTION__, mCameraId, strerror(-res), res);
         return res;
     }
 
-    res = mDevice->setStreamingRequest(*request);
-    if (res != OK) {
-        ALOGE("%s: Camera %d: Unable to set preview request to start preview: "
-                "%s (%d)",
-                __FUNCTION__, mCameraId, strerror(-res), res);
-        return res;
-    }
     params.state = Parameters::PREVIEW;
-
     return OK;
 }
 
@@ -776,8 +716,7 @@
         case Parameters::RECORD:
             // no break - identical to preview
         case Parameters::PREVIEW:
-            mDevice->clearStreamingRequest();
-            mDevice->waitUntilDrained();
+            mStreamingProcessor->stopStream();
             // no break
         case Parameters::WAITING_FOR_PREVIEW_WINDOW: {
             SharedParameters::Lock l(mParameters);
@@ -866,14 +805,24 @@
         return INVALID_OPERATION;
     }
 
-    mCameraService->playSound(CameraService::SOUND_RECORDING);
+    if (!restart) {
+        mCameraService->playSound(CameraService::SOUND_RECORDING);
+        mStreamingProcessor->updateRecordingRequest(params);
+        if (res != OK) {
+            ALOGE("%s: Camera %d: Unable to update recording request: %s (%d)",
+                    __FUNCTION__, mCameraId, strerror(-res), res);
+            return res;
+        }
+    }
 
-    res = updateRecordingStream(params);
+    res = mStreamingProcessor->updateRecordingStream(params);
     if (res != OK) {
         ALOGE("%s: Camera %d: Unable to update recording stream: %s (%d)",
                 __FUNCTION__, mCameraId, strerror(-res), res);
         return res;
     }
+
+    Vector<uint8_t> outputStreams;
     bool callbacksEnabled = params.previewCallbackFlags &
         CAMERA_FRAME_CALLBACK_FLAG_ENABLE_MASK;
     if (callbacksEnabled) {
@@ -883,54 +832,19 @@
                     __FUNCTION__, mCameraId, strerror(-res), res);
             return res;
         }
+        outputStreams.push(getCallbackStreamId());
     }
+    outputStreams.push(getPreviewStreamId());
+    outputStreams.push(getRecordingStreamId());
 
-    if (mRecordingRequest.entryCount() == 0) {
-        res = updateRecordingRequest(params);
-        if (res != OK) {
-            ALOGE("%s: Camera %d: Unable to create recording request: %s (%d)",
-                    __FUNCTION__, mCameraId, strerror(-res), res);
-            return res;
-        }
-    }
-
-    if (callbacksEnabled) {
-        uint8_t outputStreams[3] ={
-            getPreviewStreamId(),
-            getRecordingStreamId(),
-            getCallbackStreamId()
-        };
-        res = mRecordingRequest.update(
-                ANDROID_REQUEST_OUTPUT_STREAMS,
-                outputStreams, 3);
-    } else {
-        uint8_t outputStreams[2] = {
-            getPreviewStreamId(),
-            getRecordingStreamId()
-        };
-        res = mRecordingRequest.update(
-                ANDROID_REQUEST_OUTPUT_STREAMS,
-                outputStreams, 2);
-    }
+    res = mStreamingProcessor->startStream(StreamingProcessor::RECORD,
+            outputStreams);
     if (res != OK) {
-        ALOGE("%s: Camera %d: Unable to set up recording request: %s (%d)",
-                __FUNCTION__, mCameraId, strerror(-res), res);
-        return res;
-    }
-    res = mRecordingRequest.sort();
-    if (res != OK) {
-        ALOGE("%s: Camera %d: Error sorting recording request: %s (%d)",
+        ALOGE("%s: Camera %d: Unable to start recording stream: %s (%d)",
                 __FUNCTION__, mCameraId, strerror(-res), res);
         return res;
     }
 
-    res = mDevice->setStreamingRequest(mRecordingRequest);
-    if (res != OK) {
-        ALOGE("%s: Camera %d: Unable to set recording request to start "
-                "recording: %s (%d)", __FUNCTION__, mCameraId,
-                strerror(-res), res);
-        return res;
-    }
     if (params.state < Parameters::RECORD) {
         params.state = Parameters::RECORD;
     }
@@ -991,60 +905,9 @@
 void Camera2Client::releaseRecordingFrame(const sp<IMemory>& mem) {
     ATRACE_CALL();
     Mutex::Autolock icl(mICameraLock);
-    status_t res;
     if ( checkPid(__FUNCTION__) != OK) return;
 
-    SharedParameters::Lock l(mParameters);
-
-    // Make sure this is for the current heap
-    ssize_t offset;
-    size_t size;
-    sp<IMemoryHeap> heap = mem->getMemory(&offset, &size);
-    if (heap->getHeapID() != mRecordingHeap->mHeap->getHeapID()) {
-        ALOGW("%s: Camera %d: Mismatched heap ID, ignoring release "
-                "(got %x, expected %x)", __FUNCTION__, mCameraId,
-                heap->getHeapID(), mRecordingHeap->mHeap->getHeapID());
-        return;
-    }
-    uint8_t *data = (uint8_t*)heap->getBase() + offset;
-    uint32_t type = *(uint32_t*)data;
-    if (type != kMetadataBufferTypeGrallocSource) {
-        ALOGE("%s: Camera %d: Recording frame type invalid (got %x, expected %x)",
-                __FUNCTION__, mCameraId, type, kMetadataBufferTypeGrallocSource);
-        return;
-    }
-
-    // Release the buffer back to the recording queue
-
-    buffer_handle_t imgHandle = *(buffer_handle_t*)(data + 4);
-
-    size_t itemIndex;
-    for (itemIndex = 0; itemIndex < mRecordingBuffers.size(); itemIndex++) {
-        const BufferItemConsumer::BufferItem item = mRecordingBuffers[itemIndex];
-        if (item.mBuf != BufferItemConsumer::INVALID_BUFFER_SLOT &&
-                item.mGraphicBuffer->handle == imgHandle) {
-            break;
-        }
-    }
-    if (itemIndex == mRecordingBuffers.size()) {
-        ALOGE("%s: Camera %d: Can't find buffer_handle_t %p in list of "
-                "outstanding buffers", __FUNCTION__, mCameraId, imgHandle);
-        return;
-    }
-
-    ALOGV("%s: Camera %d: Freeing buffer_handle_t %p", __FUNCTION__, mCameraId,
-            imgHandle);
-
-    res = mRecordingConsumer->releaseBuffer(mRecordingBuffers[itemIndex]);
-    if (res != OK) {
-        ALOGE("%s: Camera %d: Unable to free recording frame (buffer_handle_t: %p):"
-                "%s (%d)",
-                __FUNCTION__, mCameraId, imgHandle, strerror(-res), res);
-        return;
-    }
-    mRecordingBuffers.replaceAt(itemIndex);
-
-    mRecordingHeapFree++;
+    mStreamingProcessor->releaseRecordingFrame(mem);
 }
 
 status_t Camera2Client::autoFocus() {
@@ -1222,8 +1085,8 @@
     }
     SharedParameters::Lock l(mParameters);
     if (transform != l.mParameters.previewTransform &&
-            mPreviewStreamId != NO_STREAM) {
-        mDevice->setStreamTransform(mPreviewStreamId, transform);
+            getPreviewStreamId() != NO_STREAM) {
+        mDevice->setStreamTransform(getPreviewStreamId(), transform);
     }
     l.mParameters.previewTransform = transform;
     return OK;
@@ -1336,23 +1199,7 @@
         return INVALID_OPERATION;
     }
 
-    // 32 is the current upper limit on the video buffer count for BufferQueue
-    if (count > 32) {
-        ALOGE("%s: Camera %d: Error setting %d as video buffer count value",
-                __FUNCTION__, mCameraId, count);
-        return BAD_VALUE;
-    }
-
-    // Need to reallocate memory for heap
-    if (mRecordingHeapCount != count) {
-        if  (mRecordingHeap != 0) {
-            mRecordingHeap.clear();
-            mRecordingHeap = NULL;
-        }
-        mRecordingHeapCount = count;
-    }
-
-    return OK;
+    return mStreamingProcessor->setRecordingBufferCount(count);
 }
 
 /** Device-related methods */
@@ -1503,7 +1350,7 @@
 }
 
 int Camera2Client::getPreviewStreamId() const {
-    return mPreviewStreamId;
+    return mStreamingProcessor->getPreviewStreamId();
 }
 
 int Camera2Client::getCaptureStreamId() const {
@@ -1515,7 +1362,7 @@
 }
 
 int Camera2Client::getRecordingStreamId() const {
-    return mRecordingStreamId;
+    return mStreamingProcessor->getRecordingStreamId();
 }
 
 int Camera2Client::getZslStreamId() const {
@@ -1561,117 +1408,18 @@
 const int32_t Camera2Client::kRecordRequestId;
 const int32_t Camera2Client::kFirstCaptureRequestId;
 
-void Camera2Client::onRecordingFrameAvailable() {
-    ATRACE_CALL();
-    status_t res;
-    sp<Camera2Heap> recordingHeap;
-    size_t heapIdx = 0;
-    nsecs_t timestamp;
-    {
-        SharedParameters::Lock l(mParameters);
-
-        BufferItemConsumer::BufferItem imgBuffer;
-        res = mRecordingConsumer->acquireBuffer(&imgBuffer);
-        if (res != OK) {
-            ALOGE("%s: Camera %d: Error receiving recording buffer: %s (%d)",
-                    __FUNCTION__, mCameraId, strerror(-res), res);
-            return;
-        }
-        timestamp = imgBuffer.mTimestamp;
-
-        mRecordingFrameCount++;
-        ALOGV("OnRecordingFrame: Frame %d", mRecordingFrameCount);
-
-        // TODO: Signal errors here upstream
-        if (l.mParameters.state != Parameters::RECORD &&
-                l.mParameters.state != Parameters::VIDEO_SNAPSHOT) {
-            ALOGV("%s: Camera %d: Discarding recording image buffers received after "
-                    "recording done",
-                    __FUNCTION__, mCameraId);
-            mRecordingConsumer->releaseBuffer(imgBuffer);
-            return;
-        }
-
-        if (mRecordingHeap == 0) {
-            const size_t bufferSize = 4 + sizeof(buffer_handle_t);
-            ALOGV("%s: Camera %d: Creating recording heap with %d buffers of "
-                    "size %d bytes", __FUNCTION__, mCameraId,
-                    mRecordingHeapCount, bufferSize);
-
-            mRecordingHeap = new Camera2Heap(bufferSize, mRecordingHeapCount,
-                    "Camera2Client::RecordingHeap");
-            if (mRecordingHeap->mHeap->getSize() == 0) {
-                ALOGE("%s: Camera %d: Unable to allocate memory for recording",
-                        __FUNCTION__, mCameraId);
-                mRecordingConsumer->releaseBuffer(imgBuffer);
-                return;
-            }
-            for (size_t i = 0; i < mRecordingBuffers.size(); i++) {
-                if (mRecordingBuffers[i].mBuf !=
-                        BufferItemConsumer::INVALID_BUFFER_SLOT) {
-                    ALOGE("%s: Camera %d: Non-empty recording buffers list!",
-                            __FUNCTION__, mCameraId);
-                }
-            }
-            mRecordingBuffers.clear();
-            mRecordingBuffers.setCapacity(mRecordingHeapCount);
-            mRecordingBuffers.insertAt(0, mRecordingHeapCount);
-
-            mRecordingHeapHead = 0;
-            mRecordingHeapFree = mRecordingHeapCount;
-        }
-
-        if ( mRecordingHeapFree == 0) {
-            ALOGE("%s: Camera %d: No free recording buffers, dropping frame",
-                    __FUNCTION__, mCameraId);
-            mRecordingConsumer->releaseBuffer(imgBuffer);
-            return;
-        }
-
-        heapIdx = mRecordingHeapHead;
-        mRecordingHeapHead = (mRecordingHeapHead + 1) % mRecordingHeapCount;
-        mRecordingHeapFree--;
-
-        ALOGV("%s: Camera %d: Timestamp %lld",
-                __FUNCTION__, mCameraId, timestamp);
-
-        ssize_t offset;
-        size_t size;
-        sp<IMemoryHeap> heap =
-                mRecordingHeap->mBuffers[heapIdx]->getMemory(&offset,
-                        &size);
-
-        uint8_t *data = (uint8_t*)heap->getBase() + offset;
-        uint32_t type = kMetadataBufferTypeGrallocSource;
-        *((uint32_t*)data) = type;
-        *((buffer_handle_t*)(data + 4)) = imgBuffer.mGraphicBuffer->handle;
-        ALOGV("%s: Camera %d: Sending out buffer_handle_t %p",
-                __FUNCTION__, mCameraId, imgBuffer.mGraphicBuffer->handle);
-        mRecordingBuffers.replaceAt(imgBuffer, heapIdx);
-        recordingHeap = mRecordingHeap;
-    }
-
-    // Call outside locked parameters to allow re-entrancy from notification
-    SharedCameraClient::Lock l(mSharedCameraClient);
-    if (l.mCameraClient != 0) {
-        l.mCameraClient->dataCallbackTimestamp(timestamp,
-                CAMERA_MSG_VIDEO_FRAME,
-                recordingHeap->mBuffers[heapIdx]);
-    }
-}
-
 /** Utility methods */
 
 status_t Camera2Client::updateRequests(Parameters &params) {
     status_t res;
 
-    res = updatePreviewRequest(params);
+    res = mStreamingProcessor->updatePreviewRequest(params);
     if (res != OK) {
         ALOGE("%s: Camera %d: Unable to update preview request: %s (%d)",
                 __FUNCTION__, mCameraId, strerror(-res), res);
         return res;
     }
-    res = updateRecordingRequest(params);
+    res = mStreamingProcessor->updateRecordingRequest(params);
     if (res != OK) {
         ALOGE("%s: Camera %d: Unable to update recording request: %s (%d)",
                 __FUNCTION__, mCameraId, strerror(-res), res);
@@ -1687,7 +1435,7 @@
         }
     } else if (params.state == Parameters::RECORD ||
             params.state == Parameters::VIDEO_SNAPSHOT) {
-        res = mDevice->setStreamingRequest(mRecordingRequest);
+        res = startRecordingL(params, true);
         if (res != OK) {
             ALOGE("%s: Camera %d: Error streaming new record request: %s (%d)",
                     __FUNCTION__, mCameraId, strerror(-res), res);
@@ -1697,172 +1445,6 @@
     return res;
 }
 
-status_t Camera2Client::updatePreviewStream(const Parameters &params) {
-    ATRACE_CALL();
-    status_t res;
-
-    if (mPreviewStreamId != NO_STREAM) {
-        // Check if stream parameters have to change
-        uint32_t currentWidth, currentHeight;
-        res = mDevice->getStreamInfo(mPreviewStreamId,
-                &currentWidth, &currentHeight, 0);
-        if (res != OK) {
-            ALOGE("%s: Camera %d: Error querying preview stream info: "
-                    "%s (%d)", __FUNCTION__, mCameraId, strerror(-res), res);
-            return res;
-        }
-        if (currentWidth != (uint32_t)params.previewWidth ||
-                currentHeight != (uint32_t)params.previewHeight) {
-            ALOGV("%s: Camera %d: Preview size switch: %d x %d -> %d x %d",
-                    __FUNCTION__, mCameraId, currentWidth, currentHeight,
-                    params.previewWidth, params.previewHeight);
-            res = mDevice->waitUntilDrained();
-            if (res != OK) {
-                ALOGE("%s: Camera %d: Error waiting for preview to drain: "
-                        "%s (%d)", __FUNCTION__, mCameraId, strerror(-res), res);
-                return res;
-            }
-            res = mDevice->deleteStream(mPreviewStreamId);
-            if (res != OK) {
-                ALOGE("%s: Camera %d: Unable to delete old output stream "
-                        "for preview: %s (%d)", __FUNCTION__, mCameraId,
-                        strerror(-res), res);
-                return res;
-            }
-            mPreviewStreamId = NO_STREAM;
-        }
-    }
-
-    if (mPreviewStreamId == NO_STREAM) {
-        res = mDevice->createStream(mPreviewWindow,
-                params.previewWidth, params.previewHeight,
-                CAMERA2_HAL_PIXEL_FORMAT_OPAQUE, 0,
-                &mPreviewStreamId);
-        if (res != OK) {
-            ALOGE("%s: Camera %d: Unable to create preview stream: %s (%d)",
-                    __FUNCTION__, mCameraId, strerror(-res), res);
-            return res;
-        }
-    }
-
-    res = mDevice->setStreamTransform(mPreviewStreamId,
-            params.previewTransform);
-    if (res != OK) {
-        ALOGE("%s: Camera %d: Unable to set preview stream transform: "
-                "%s (%d)", __FUNCTION__, mCameraId, strerror(-res), res);
-        return res;
-    }
-
-    return OK;
-}
-
-status_t Camera2Client::updatePreviewRequest(const Parameters &params) {
-    ATRACE_CALL();
-    status_t res;
-    if (mPreviewRequest.entryCount() == 0) {
-        res = mDevice->createDefaultRequest(CAMERA2_TEMPLATE_PREVIEW,
-                &mPreviewRequest);
-        if (res != OK) {
-            ALOGE("%s: Camera %d: Unable to create default preview request: "
-                    "%s (%d)", __FUNCTION__, mCameraId, strerror(-res), res);
-            return res;
-        }
-    }
-
-    res = params.updateRequest(&mPreviewRequest);
-    if (res != OK) {
-        ALOGE("%s: Camera %d: Unable to update common entries of preview "
-                "request: %s (%d)", __FUNCTION__, mCameraId,
-                strerror(-res), res);
-        return res;
-    }
-
-    res = mPreviewRequest.update(ANDROID_REQUEST_ID,
-            &kPreviewRequestId, 1);
-
-    return OK;
-}
-
-status_t Camera2Client::updateRecordingRequest(const Parameters &params) {
-    ATRACE_CALL();
-    status_t res;
-    if (mRecordingRequest.entryCount() == 0) {
-        res = mDevice->createDefaultRequest(CAMERA2_TEMPLATE_VIDEO_RECORD,
-                &mRecordingRequest);
-        if (res != OK) {
-            ALOGE("%s: Camera %d: Unable to create default recording request:"
-                    " %s (%d)", __FUNCTION__, mCameraId, strerror(-res), res);
-            return res;
-        }
-    }
-
-    res = params.updateRequest(&mRecordingRequest);
-    if (res != OK) {
-        ALOGE("%s: Camera %d: Unable to update common entries of recording "
-                "request: %s (%d)", __FUNCTION__, mCameraId,
-                strerror(-res), res);
-        return res;
-    }
-
-    return OK;
-}
-
-status_t Camera2Client::updateRecordingStream(const Parameters &params) {
-    status_t res;
-
-    if (mRecordingConsumer == 0) {
-        // Create CPU buffer queue endpoint. We need one more buffer here so that we can
-        // always acquire and free a buffer when the heap is full; otherwise the consumer
-        // will have buffers in flight we'll never clear out.
-        mRecordingConsumer = new BufferItemConsumer(
-                GRALLOC_USAGE_HW_VIDEO_ENCODER,
-                mRecordingHeapCount + 1,
-                true);
-        mRecordingConsumer->setFrameAvailableListener(new RecordingWaiter(this));
-        mRecordingConsumer->setName(String8("Camera2Client::RecordingConsumer"));
-        mRecordingWindow = new SurfaceTextureClient(
-            mRecordingConsumer->getProducerInterface());
-        // Allocate memory later, since we don't know buffer size until receipt
-    }
-
-    if (mRecordingStreamId != NO_STREAM) {
-        // Check if stream parameters have to change
-        uint32_t currentWidth, currentHeight;
-        res = mDevice->getStreamInfo(mRecordingStreamId,
-                &currentWidth, &currentHeight, 0);
-        if (res != OK) {
-            ALOGE("%s: Camera %d: Error querying recording output stream info: "
-                    "%s (%d)", __FUNCTION__, mCameraId, strerror(-res), res);
-            return res;
-        }
-        if (currentWidth != (uint32_t)params.videoWidth ||
-                currentHeight != (uint32_t)params.videoHeight) {
-            // TODO: Should wait to be sure previous recording has finished
-            res = mDevice->deleteStream(mRecordingStreamId);
-            if (res != OK) {
-                ALOGE("%s: Camera %d: Unable to delete old output stream "
-                        "for recording: %s (%d)", __FUNCTION__, mCameraId,
-                        strerror(-res), res);
-                return res;
-            }
-            mRecordingStreamId = NO_STREAM;
-        }
-    }
-
-    if (mRecordingStreamId == NO_STREAM) {
-        mRecordingFrameCount = 0;
-        res = mDevice->createStream(mRecordingWindow,
-                params.videoWidth, params.videoHeight,
-                CAMERA2_HAL_PIXEL_FORMAT_OPAQUE, 0, &mRecordingStreamId);
-        if (res != OK) {
-            ALOGE("%s: Camera %d: Can't create output stream for recording: "
-                    "%s (%d)", __FUNCTION__, mCameraId, strerror(-res), res);
-            return res;
-        }
-    }
-
-    return OK;
-}
 
 size_t Camera2Client::calculateBufferSize(int width, int height,
         int format, int stride) {
diff --git a/services/camera/libcameraservice/Camera2Client.h b/services/camera/libcameraservice/Camera2Client.h
index 1eb024a..3a9d307 100644
--- a/services/camera/libcameraservice/Camera2Client.h
+++ b/services/camera/libcameraservice/Camera2Client.h
@@ -21,17 +21,15 @@
 #include "CameraService.h"
 #include "camera2/Parameters.h"
 #include "camera2/FrameProcessor.h"
+#include "camera2/StreamingProcessor.h"
 #include "camera2/JpegProcessor.h"
 #include "camera2/ZslProcessor.h"
 #include "camera2/CaptureSequencer.h"
 #include "camera2/CallbackProcessor.h"
-#include <binder/MemoryBase.h>
-#include <binder/MemoryHeapBase.h>
-#include <gui/CpuConsumer.h>
-#include <gui/BufferItemConsumer.h>
 
 namespace android {
 
+class IMemory;
 /**
  * Implements the android.hardware.camera API on top of
  * camera device HAL version 2.
@@ -184,15 +182,10 @@
 
     sp<camera2::FrameProcessor> mFrameProcessor;
 
-    /* Preview related members */
+    /* Preview/Recording related members */
 
-    int mPreviewStreamId;
-    CameraMetadata mPreviewRequest;
     sp<IBinder> mPreviewSurface;
-    sp<ANativeWindow> mPreviewWindow;
-
-    status_t updatePreviewRequest(const Parameters &params);
-    status_t updatePreviewStream(const Parameters &params);
+    sp<camera2::StreamingProcessor> mStreamingProcessor;
 
     /** Preview callback related members */
 
@@ -204,35 +197,6 @@
     sp<camera2::JpegProcessor> mJpegProcessor;
     sp<camera2::ZslProcessor> mZslProcessor;
 
-    /* Recording related members */
-
-    int mRecordingStreamId;
-    int mRecordingFrameCount;
-    sp<BufferItemConsumer>    mRecordingConsumer;
-    sp<ANativeWindow>  mRecordingWindow;
-    // Simple listener that forwards frame available notifications from
-    // a CPU consumer to the recording notification
-    class RecordingWaiter: public BufferItemConsumer::FrameAvailableListener {
-      public:
-        RecordingWaiter(Camera2Client *parent) : mParent(parent) {}
-        void onFrameAvailable() { mParent->onRecordingFrameAvailable(); }
-      private:
-        Camera2Client *mParent;
-    };
-    sp<RecordingWaiter>  mRecordingWaiter;
-    CameraMetadata mRecordingRequest;
-    sp<camera2::Camera2Heap> mRecordingHeap;
-
-    static const size_t kDefaultRecordingHeapCount = 8;
-    size_t mRecordingHeapCount;
-    Vector<BufferItemConsumer::BufferItem> mRecordingBuffers;
-    size_t mRecordingHeapHead, mRecordingHeapFree;
-    // Handle new recording image buffers
-    void onRecordingFrameAvailable();
-
-    status_t updateRecordingRequest(const Parameters &params);
-    status_t updateRecordingStream(const Parameters &params);
-
     /** Notification-related members */
 
     bool mAfInMotion;
diff --git a/services/camera/libcameraservice/camera2/JpegProcessor.cpp b/services/camera/libcameraservice/camera2/JpegProcessor.cpp
index a40ddcc..7b368fa 100644
--- a/services/camera/libcameraservice/camera2/JpegProcessor.cpp
+++ b/services/camera/libcameraservice/camera2/JpegProcessor.cpp
@@ -20,6 +20,8 @@
 
 #include <netinet/in.h>
 
+#include <binder/MemoryBase.h>
+#include <binder/MemoryHeapBase.h>
 #include <utils/Log.h>
 #include <utils/Trace.h>
 
diff --git a/services/camera/libcameraservice/camera2/JpegProcessor.h b/services/camera/libcameraservice/camera2/JpegProcessor.h
index 5464519..836bd02 100644
--- a/services/camera/libcameraservice/camera2/JpegProcessor.h
+++ b/services/camera/libcameraservice/camera2/JpegProcessor.h
@@ -25,11 +25,11 @@
 #include <gui/CpuConsumer.h>
 #include "Parameters.h"
 #include "CameraMetadata.h"
-#include "Camera2Heap.h"
 
 namespace android {
 
 class Camera2Client;
+class MemoryHeapBase;
 
 namespace camera2 {
 
diff --git a/services/camera/libcameraservice/camera2/Parameters.cpp b/services/camera/libcameraservice/camera2/Parameters.cpp
index 6383434..8ae390d 100644
--- a/services/camera/libcameraservice/camera2/Parameters.cpp
+++ b/services/camera/libcameraservice/camera2/Parameters.cpp
@@ -1331,10 +1331,6 @@
         ALOGE("%s: Video stabilization not supported", __FUNCTION__);
     }
 
-    // LIGHTFX
-    validatedParams.lightFx = lightFxStringToEnum(
-        newParams.get(CameraParameters::KEY_LIGHTFX));
-
     /** Update internal parameters */
 
     validatedParams.paramsFlattened = params;
@@ -1746,18 +1742,6 @@
         Parameters::FOCUS_MODE_INVALID;
 }
 
-Parameters::Parameters::lightFxMode_t Parameters::lightFxStringToEnum(
-        const char *lightFxMode) {
-    return
-        !lightFxMode ?
-            Parameters::LIGHTFX_NONE :
-        !strcmp(lightFxMode, CameraParameters::LIGHTFX_LOWLIGHT) ?
-            Parameters::LIGHTFX_LOWLIGHT :
-        !strcmp(lightFxMode, CameraParameters::LIGHTFX_HDR) ?
-            Parameters::LIGHTFX_HDR :
-        Parameters::LIGHTFX_NONE;
-}
-
 status_t Parameters::parseAreas(const char *areasCStr,
         Vector<Parameters::Area> *areas) {
     static const size_t NUM_FIELDS = 5;
diff --git a/services/camera/libcameraservice/camera2/Parameters.h b/services/camera/libcameraservice/camera2/Parameters.h
index af23a4e..f830e21 100644
--- a/services/camera/libcameraservice/camera2/Parameters.h
+++ b/services/camera/libcameraservice/camera2/Parameters.h
@@ -213,7 +213,6 @@
     static int sceneModeStringToEnum(const char *sceneMode);
     static flashMode_t flashModeStringToEnum(const char *flashMode);
     static focusMode_t focusModeStringToEnum(const char *focusMode);
-    static lightFxMode_t lightFxStringToEnum(const char *lightFxMode);
     static status_t parseAreas(const char *areasCStr,
             Vector<Area> *areas);
     static status_t validateAreas(const Vector<Area> &areas,
diff --git a/services/camera/libcameraservice/camera2/StreamingProcessor.cpp b/services/camera/libcameraservice/camera2/StreamingProcessor.cpp
new file mode 100644
index 0000000..140138d
--- /dev/null
+++ b/services/camera/libcameraservice/camera2/StreamingProcessor.cpp
@@ -0,0 +1,604 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "Camera2-StreamingProcessor"
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+//#define LOG_NDEBUG 0
+
+#include <utils/Log.h>
+#include <utils/Trace.h>
+#include <gui/SurfaceTextureClient.h>
+#include <media/hardware/MetadataBufferType.h>
+
+#include "StreamingProcessor.h"
+#include "Camera2Heap.h"
+#include "../Camera2Client.h"
+#include "../Camera2Device.h"
+
+namespace android {
+namespace camera2 {
+
+StreamingProcessor::StreamingProcessor(wp<Camera2Client> client):
+        mClient(client),
+        mPreviewStreamId(NO_STREAM),
+        mRecordingStreamId(NO_STREAM),
+        mRecordingHeapCount(kDefaultRecordingHeapCount)
+{
+
+}
+
+StreamingProcessor::~StreamingProcessor() {
+    deletePreviewStream();
+    deleteRecordingStream();
+}
+
+status_t StreamingProcessor::setPreviewWindow(sp<ANativeWindow> window) {
+    ATRACE_CALL();
+    status_t res;
+
+    res = deletePreviewStream();
+    if (res != OK) return res;
+
+    Mutex::Autolock m(mMutex);
+
+    mPreviewWindow = window;
+
+    return OK;
+}
+
+bool StreamingProcessor::haveValidPreviewWindow() const {
+    Mutex::Autolock m(mMutex);
+    return mPreviewWindow != 0;
+}
+
+status_t StreamingProcessor::updatePreviewRequest(const Parameters &params) {
+    ATRACE_CALL();
+    status_t res;
+    sp<Camera2Client> client = mClient.promote();
+    if (client == 0) return INVALID_OPERATION;
+
+    Mutex::Autolock m(mMutex);
+    if (mPreviewRequest.entryCount() == 0) {
+        res = client->getCameraDevice()->createDefaultRequest(CAMERA2_TEMPLATE_PREVIEW,
+                &mPreviewRequest);
+        if (res != OK) {
+            ALOGE("%s: Camera %d: Unable to create default preview request: "
+                    "%s (%d)", __FUNCTION__, client->getCameraId(), strerror(-res), res);
+            return res;
+        }
+    }
+
+    res = params.updateRequest(&mPreviewRequest);
+    if (res != OK) {
+        ALOGE("%s: Camera %d: Unable to update common entries of preview "
+                "request: %s (%d)", __FUNCTION__, client->getCameraId(),
+                strerror(-res), res);
+        return res;
+    }
+
+    res = mPreviewRequest.update(ANDROID_REQUEST_ID,
+            &Camera2Client::kPreviewRequestId, 1);
+
+    return OK;
+}
+
+status_t StreamingProcessor::updatePreviewStream(const Parameters &params) {
+    ATRACE_CALL();
+    Mutex::Autolock m(mMutex);
+
+    status_t res;
+    sp<Camera2Client> client = mClient.promote();
+    if (client == 0) return INVALID_OPERATION;
+    sp<Camera2Device> device = client->getCameraDevice();
+
+    if (mPreviewStreamId != NO_STREAM) {
+        // Check if stream parameters have to change
+        uint32_t currentWidth, currentHeight;
+        res = device->getStreamInfo(mPreviewStreamId,
+                &currentWidth, &currentHeight, 0);
+        if (res != OK) {
+            ALOGE("%s: Camera %d: Error querying preview stream info: "
+                    "%s (%d)", __FUNCTION__, client->getCameraId(), strerror(-res), res);
+            return res;
+        }
+        if (currentWidth != (uint32_t)params.previewWidth ||
+                currentHeight != (uint32_t)params.previewHeight) {
+            ALOGV("%s: Camera %d: Preview size switch: %d x %d -> %d x %d",
+                    __FUNCTION__, client->getCameraId(), currentWidth, currentHeight,
+                    params.previewWidth, params.previewHeight);
+            res = device->waitUntilDrained();
+            if (res != OK) {
+                ALOGE("%s: Camera %d: Error waiting for preview to drain: "
+                        "%s (%d)", __FUNCTION__, client->getCameraId(), strerror(-res), res);
+                return res;
+            }
+            res = device->deleteStream(mPreviewStreamId);
+            if (res != OK) {
+                ALOGE("%s: Camera %d: Unable to delete old output stream "
+                        "for preview: %s (%d)", __FUNCTION__, client->getCameraId(),
+                        strerror(-res), res);
+                return res;
+            }
+            mPreviewStreamId = NO_STREAM;
+        }
+    }
+
+    if (mPreviewStreamId == NO_STREAM) {
+        res = device->createStream(mPreviewWindow,
+                params.previewWidth, params.previewHeight,
+                CAMERA2_HAL_PIXEL_FORMAT_OPAQUE, 0,
+                &mPreviewStreamId);
+        if (res != OK) {
+            ALOGE("%s: Camera %d: Unable to create preview stream: %s (%d)",
+                    __FUNCTION__, client->getCameraId(), strerror(-res), res);
+            return res;
+        }
+    }
+
+    res = device->setStreamTransform(mPreviewStreamId,
+            params.previewTransform);
+    if (res != OK) {
+        ALOGE("%s: Camera %d: Unable to set preview stream transform: "
+                "%s (%d)", __FUNCTION__, client->getCameraId(), strerror(-res), res);
+        return res;
+    }
+
+    return OK;
+}
+
+status_t StreamingProcessor::deletePreviewStream() {
+    ATRACE_CALL();
+    status_t res;
+
+    Mutex::Autolock m(mMutex);
+
+    if (mPreviewStreamId != NO_STREAM) {
+        sp<Camera2Client> client = mClient.promote();
+        if (client == 0) return INVALID_OPERATION;
+        sp<Camera2Device> device = client->getCameraDevice();
+
+        res = device->waitUntilDrained();
+        if (res != OK) {
+            ALOGE("%s: Error waiting for preview to drain: %s (%d)",
+                    __FUNCTION__, strerror(-res), res);
+            return res;
+        }
+        res = device->deleteStream(mPreviewStreamId);
+        if (res != OK) {
+            ALOGE("%s: Unable to delete old preview stream: %s (%d)",
+                    __FUNCTION__, strerror(-res), res);
+            return res;
+        }
+        mPreviewStreamId = NO_STREAM;
+    }
+    return OK;
+}
+
+status_t StreamingProcessor::getPreviewStreamId() const {
+    Mutex::Autolock m(mMutex);
+    return mPreviewStreamId;
+}
+
+status_t StreamingProcessor::setRecordingBufferCount(size_t count) {
+    ATRACE_CALL();
+    // 32 is the current upper limit on the video buffer count for BufferQueue
+    sp<Camera2Client> client = mClient.promote();
+    if (client == 0) return INVALID_OPERATION;
+    if (count > 32) {
+        ALOGE("%s: Camera %d: Error setting %d as video buffer count value",
+                __FUNCTION__, client->getCameraId(), count);
+        return BAD_VALUE;
+    }
+
+    Mutex::Autolock m(mMutex);
+
+    // Need to reallocate memory for heap
+    if (mRecordingHeapCount != count) {
+        if  (mRecordingHeap != 0) {
+            mRecordingHeap.clear();
+            mRecordingHeap = NULL;
+        }
+        mRecordingHeapCount = count;
+    }
+
+    return OK;
+}
+
+status_t StreamingProcessor::updateRecordingRequest(const Parameters &params) {
+    ATRACE_CALL();
+    status_t res;
+    Mutex::Autolock m(mMutex);
+
+    sp<Camera2Client> client = mClient.promote();
+    if (client == 0) return INVALID_OPERATION;
+
+    if (mRecordingRequest.entryCount() == 0) {
+        res = client->getCameraDevice()->createDefaultRequest(CAMERA2_TEMPLATE_VIDEO_RECORD,
+                &mRecordingRequest);
+        if (res != OK) {
+            ALOGE("%s: Camera %d: Unable to create default recording request:"
+                    " %s (%d)", __FUNCTION__, client->getCameraId(), strerror(-res), res);
+            return res;
+        }
+    }
+
+    res = params.updateRequest(&mRecordingRequest);
+    if (res != OK) {
+        ALOGE("%s: Camera %d: Unable to update common entries of recording "
+                "request: %s (%d)", __FUNCTION__, client->getCameraId(),
+                strerror(-res), res);
+        return res;
+    }
+
+    return OK;
+}
+
+status_t StreamingProcessor::updateRecordingStream(const Parameters &params) {
+    ATRACE_CALL();
+    status_t res;
+    Mutex::Autolock m(mMutex);
+
+    sp<Camera2Client> client = mClient.promote();
+    if (client == 0) return INVALID_OPERATION;
+    sp<Camera2Device> device = client->getCameraDevice();
+
+    if (mRecordingConsumer == 0) {
+        // Create CPU buffer queue endpoint. We need one more buffer here so that we can
+        // always acquire and free a buffer when the heap is full; otherwise the consumer
+        // will have buffers in flight we'll never clear out.
+        mRecordingConsumer = new BufferItemConsumer(
+                GRALLOC_USAGE_HW_VIDEO_ENCODER,
+                mRecordingHeapCount + 1,
+                true);
+        mRecordingConsumer->setFrameAvailableListener(this);
+        mRecordingConsumer->setName(String8("Camera2-RecordingConsumer"));
+        mRecordingWindow = new SurfaceTextureClient(
+            mRecordingConsumer->getProducerInterface());
+        // Allocate memory later, since we don't know buffer size until receipt
+    }
+
+    if (mRecordingStreamId != NO_STREAM) {
+        // Check if stream parameters have to change
+        uint32_t currentWidth, currentHeight;
+        res = device->getStreamInfo(mRecordingStreamId,
+                &currentWidth, &currentHeight, 0);
+        if (res != OK) {
+            ALOGE("%s: Camera %d: Error querying recording output stream info: "
+                    "%s (%d)", __FUNCTION__, client->getCameraId(),
+                    strerror(-res), res);
+            return res;
+        }
+        if (currentWidth != (uint32_t)params.videoWidth ||
+                currentHeight != (uint32_t)params.videoHeight) {
+            // TODO: Should wait to be sure previous recording has finished
+            res = device->deleteStream(mRecordingStreamId);
+            if (res != OK) {
+                ALOGE("%s: Camera %d: Unable to delete old output stream "
+                        "for recording: %s (%d)", __FUNCTION__,
+                        client->getCameraId(), strerror(-res), res);
+                return res;
+            }
+            mRecordingStreamId = NO_STREAM;
+        }
+    }
+
+    if (mRecordingStreamId == NO_STREAM) {
+        mRecordingFrameCount = 0;
+        res = device->createStream(mRecordingWindow,
+                params.videoWidth, params.videoHeight,
+                CAMERA2_HAL_PIXEL_FORMAT_OPAQUE, 0, &mRecordingStreamId);
+        if (res != OK) {
+            ALOGE("%s: Camera %d: Can't create output stream for recording: "
+                    "%s (%d)", __FUNCTION__, client->getCameraId(),
+                    strerror(-res), res);
+            return res;
+        }
+    }
+
+    return OK;
+}
+
+status_t StreamingProcessor::deleteRecordingStream() {
+    ATRACE_CALL();
+    status_t res;
+
+    Mutex::Autolock m(mMutex);
+
+    if (mRecordingStreamId != NO_STREAM) {
+        sp<Camera2Client> client = mClient.promote();
+        if (client == 0) return INVALID_OPERATION;
+        sp<Camera2Device> device = client->getCameraDevice();
+
+        res = device->waitUntilDrained();
+        if (res != OK) {
+            ALOGE("%s: Error waiting for HAL to drain: %s (%d)",
+                    __FUNCTION__, strerror(-res), res);
+            return res;
+        }
+        res = device->deleteStream(mRecordingStreamId);
+        if (res != OK) {
+            ALOGE("%s: Unable to delete recording stream: %s (%d)",
+                    __FUNCTION__, strerror(-res), res);
+            return res;
+        }
+        mRecordingStreamId = NO_STREAM;
+    }
+    return OK;
+}
+
+status_t StreamingProcessor::getRecordingStreamId() const {
+    return mRecordingStreamId;
+}
+
+status_t StreamingProcessor::startStream(StreamType type,
+        const Vector<uint8_t> &outputStreams) {
+    ATRACE_CALL();
+    status_t res;
+
+    sp<Camera2Client> client = mClient.promote();
+    if (client == 0) return INVALID_OPERATION;
+
+    Mutex::Autolock m(mMutex);
+
+    CameraMetadata &request = (type == PREVIEW) ?
+            mPreviewRequest : mRecordingRequest;
+
+    res = request.update(
+        ANDROID_REQUEST_OUTPUT_STREAMS,
+        outputStreams);
+    if (res != OK) {
+        ALOGE("%s: Camera %d: Unable to set up preview request: %s (%d)",
+                __FUNCTION__, client->getCameraId(), strerror(-res), res);
+        return res;
+    }
+
+    res = request.sort();
+    if (res != OK) {
+        ALOGE("%s: Camera %d: Error sorting preview request: %s (%d)",
+                __FUNCTION__, client->getCameraId(), strerror(-res), res);
+        return res;
+    }
+
+    res = client->getCameraDevice()->setStreamingRequest(request);
+    if (res != OK) {
+        ALOGE("%s: Camera %d: Unable to set preview request to start preview: "
+                "%s (%d)",
+                __FUNCTION__, client->getCameraId(), strerror(-res), res);
+        return res;
+    }
+
+    return OK;
+}
+
+status_t StreamingProcessor::stopStream() {
+    ATRACE_CALL();
+    status_t res;
+
+    sp<Camera2Client> client = mClient.promote();
+    if (client == 0) return INVALID_OPERATION;
+    sp<Camera2Device> device = client->getCameraDevice();
+
+    res = device->clearStreamingRequest();
+    if (res != OK) {
+        ALOGE("%s: Camera %d: Can't clear stream request: %s (%d)",
+                __FUNCTION__, client->getCameraId(), strerror(-res), res);
+        return res;
+    }
+    res = device->waitUntilDrained();
+    if (res != OK) {
+        ALOGE("%s: Camera %d: Waiting to stop streaming failed: %s (%d)",
+                __FUNCTION__, client->getCameraId(), strerror(-res), res);
+        return res;
+    }
+    return OK;
+}
+
+void StreamingProcessor::onFrameAvailable() {
+    ATRACE_CALL();
+    status_t res;
+    sp<Camera2Heap> recordingHeap;
+    size_t heapIdx = 0;
+    nsecs_t timestamp;
+
+    sp<Camera2Client> client = mClient.promote();
+    if (client == 0) return;
+
+    {
+        Mutex::Autolock m(mMutex);
+        BufferItemConsumer::BufferItem imgBuffer;
+        res = mRecordingConsumer->acquireBuffer(&imgBuffer);
+        if (res != OK) {
+            ALOGE("%s: Camera %d: Error receiving recording buffer: %s (%d)",
+                    __FUNCTION__, client->getCameraId(), strerror(-res), res);
+            return;
+        }
+        timestamp = imgBuffer.mTimestamp;
+
+        mRecordingFrameCount++;
+        ALOGV("OnRecordingFrame: Frame %d", mRecordingFrameCount);
+
+        {
+            SharedParameters::Lock l(client->getParameters());
+            // TODO: Signal errors here upstream
+            if (l.mParameters.state != Parameters::RECORD &&
+                    l.mParameters.state != Parameters::VIDEO_SNAPSHOT) {
+                ALOGV("%s: Camera %d: Discarding recording image buffers "
+                        "received after recording done", __FUNCTION__,
+                        client->getCameraId());
+                mRecordingConsumer->releaseBuffer(imgBuffer);
+                return;
+            }
+        }
+
+        if (mRecordingHeap == 0) {
+            const size_t bufferSize = 4 + sizeof(buffer_handle_t);
+            ALOGV("%s: Camera %d: Creating recording heap with %d buffers of "
+                    "size %d bytes", __FUNCTION__, client->getCameraId(),
+                    mRecordingHeapCount, bufferSize);
+
+            mRecordingHeap = new Camera2Heap(bufferSize, mRecordingHeapCount,
+                    "Camera2Client::RecordingHeap");
+            if (mRecordingHeap->mHeap->getSize() == 0) {
+                ALOGE("%s: Camera %d: Unable to allocate memory for recording",
+                        __FUNCTION__, client->getCameraId());
+                mRecordingConsumer->releaseBuffer(imgBuffer);
+                return;
+            }
+            for (size_t i = 0; i < mRecordingBuffers.size(); i++) {
+                if (mRecordingBuffers[i].mBuf !=
+                        BufferItemConsumer::INVALID_BUFFER_SLOT) {
+                    ALOGE("%s: Camera %d: Non-empty recording buffers list!",
+                            __FUNCTION__, client->getCameraId());
+                }
+            }
+            mRecordingBuffers.clear();
+            mRecordingBuffers.setCapacity(mRecordingHeapCount);
+            mRecordingBuffers.insertAt(0, mRecordingHeapCount);
+
+            mRecordingHeapHead = 0;
+            mRecordingHeapFree = mRecordingHeapCount;
+        }
+
+        if ( mRecordingHeapFree == 0) {
+            ALOGE("%s: Camera %d: No free recording buffers, dropping frame",
+                    __FUNCTION__, client->getCameraId());
+            mRecordingConsumer->releaseBuffer(imgBuffer);
+            return;
+        }
+
+        heapIdx = mRecordingHeapHead;
+        mRecordingHeapHead = (mRecordingHeapHead + 1) % mRecordingHeapCount;
+        mRecordingHeapFree--;
+
+        ALOGV("%s: Camera %d: Timestamp %lld",
+                __FUNCTION__, client->getCameraId(), timestamp);
+
+        ssize_t offset;
+        size_t size;
+        sp<IMemoryHeap> heap =
+                mRecordingHeap->mBuffers[heapIdx]->getMemory(&offset,
+                        &size);
+
+        uint8_t *data = (uint8_t*)heap->getBase() + offset;
+        uint32_t type = kMetadataBufferTypeGrallocSource;
+        *((uint32_t*)data) = type;
+        *((buffer_handle_t*)(data + 4)) = imgBuffer.mGraphicBuffer->handle;
+        ALOGV("%s: Camera %d: Sending out buffer_handle_t %p",
+                __FUNCTION__, client->getCameraId(),
+                imgBuffer.mGraphicBuffer->handle);
+        mRecordingBuffers.replaceAt(imgBuffer, heapIdx);
+        recordingHeap = mRecordingHeap;
+    }
+
+    // Call outside locked parameters to allow re-entrancy from notification
+    Camera2Client::SharedCameraClient::Lock l(client->mSharedCameraClient);
+    if (l.mCameraClient != 0) {
+        l.mCameraClient->dataCallbackTimestamp(timestamp,
+                CAMERA_MSG_VIDEO_FRAME,
+                recordingHeap->mBuffers[heapIdx]);
+    }
+}
+
+void StreamingProcessor::releaseRecordingFrame(const sp<IMemory>& mem) {
+    ATRACE_CALL();
+    status_t res;
+
+    sp<Camera2Client> client = mClient.promote();
+    if (client == 0) return;
+
+    Mutex::Autolock m(mMutex);
+    // Make sure this is for the current heap
+    ssize_t offset;
+    size_t size;
+    sp<IMemoryHeap> heap = mem->getMemory(&offset, &size);
+    if (heap->getHeapID() != mRecordingHeap->mHeap->getHeapID()) {
+        ALOGW("%s: Camera %d: Mismatched heap ID, ignoring release "
+                "(got %x, expected %x)", __FUNCTION__, client->getCameraId(),
+                heap->getHeapID(), mRecordingHeap->mHeap->getHeapID());
+        return;
+    }
+    uint8_t *data = (uint8_t*)heap->getBase() + offset;
+    uint32_t type = *(uint32_t*)data;
+    if (type != kMetadataBufferTypeGrallocSource) {
+        ALOGE("%s: Camera %d: Recording frame type invalid (got %x, expected %x)",
+                __FUNCTION__, client->getCameraId(), type,
+                kMetadataBufferTypeGrallocSource);
+        return;
+    }
+
+    // Release the buffer back to the recording queue
+
+    buffer_handle_t imgHandle = *(buffer_handle_t*)(data + 4);
+
+    size_t itemIndex;
+    for (itemIndex = 0; itemIndex < mRecordingBuffers.size(); itemIndex++) {
+        const BufferItemConsumer::BufferItem item =
+                mRecordingBuffers[itemIndex];
+        if (item.mBuf != BufferItemConsumer::INVALID_BUFFER_SLOT &&
+                item.mGraphicBuffer->handle == imgHandle) {
+            break;
+        }
+    }
+    if (itemIndex == mRecordingBuffers.size()) {
+        ALOGE("%s: Camera %d: Can't find buffer_handle_t %p in list of "
+                "outstanding buffers", __FUNCTION__, client->getCameraId(),
+                imgHandle);
+        return;
+    }
+
+    ALOGV("%s: Camera %d: Freeing buffer_handle_t %p", __FUNCTION__,
+            client->getCameraId(), imgHandle);
+
+    res = mRecordingConsumer->releaseBuffer(mRecordingBuffers[itemIndex]);
+    if (res != OK) {
+        ALOGE("%s: Camera %d: Unable to free recording frame "
+                "(buffer_handle_t: %p): %s (%d)", __FUNCTION__,
+                client->getCameraId(), imgHandle, strerror(-res), res);
+        return;
+    }
+    mRecordingBuffers.replaceAt(itemIndex);
+
+    mRecordingHeapFree++;
+}
+
+
+status_t StreamingProcessor::dump(int fd, const Vector<String16>& args) {
+    String8 result;
+
+    result.append("  Current requests:\n");
+    if (mPreviewRequest.entryCount() != 0) {
+        result.append("    Preview request:\n");
+        write(fd, result.string(), result.size());
+        mPreviewRequest.dump(fd, 2, 6);
+    } else {
+        result.append("    Preview request: undefined\n");
+        write(fd, result.string(), result.size());
+    }
+
+    if (mRecordingRequest.entryCount() != 0) {
+        result = "    Recording request:\n";
+        write(fd, result.string(), result.size());
+        mRecordingRequest.dump(fd, 2, 6);
+    } else {
+        result = "    Recording request: undefined\n";
+        write(fd, result.string(), result.size());
+    }
+
+    return OK;
+}
+
+}; // namespace camera2
+}; // namespace android
diff --git a/services/camera/libcameraservice/camera2/StreamingProcessor.h b/services/camera/libcameraservice/camera2/StreamingProcessor.h
new file mode 100644
index 0000000..ac58614
--- /dev/null
+++ b/services/camera/libcameraservice/camera2/StreamingProcessor.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA_CAMERA2_STREAMINGPROCESSOR_H
+#define ANDROID_SERVERS_CAMERA_CAMERA2_STREAMINGPROCESSOR_H
+
+#include <utils/Mutex.h>
+#include <utils/String16.h>
+#include <gui/BufferItemConsumer.h>
+
+#include "Parameters.h"
+#include "CameraMetadata.h"
+
+namespace android {
+
+class Camera2Client;
+class IMemory;
+
+namespace camera2 {
+
+class Camera2Heap;
+
+/**
+ * Management and processing for preview and recording streams
+ */
+class StreamingProcessor: public BufferItemConsumer::FrameAvailableListener {
+  public:
+    StreamingProcessor(wp<Camera2Client> client);
+    ~StreamingProcessor();
+
+    status_t setPreviewWindow(sp<ANativeWindow> window);
+
+    bool haveValidPreviewWindow() const;
+
+    status_t updatePreviewRequest(const Parameters &params);
+    status_t updatePreviewStream(const Parameters &params);
+    status_t deletePreviewStream();
+    int getPreviewStreamId() const;
+
+    status_t setRecordingBufferCount(size_t count);
+    status_t updateRecordingRequest(const Parameters &params);
+    status_t updateRecordingStream(const Parameters &params);
+    status_t deleteRecordingStream();
+    int getRecordingStreamId() const;
+
+    enum StreamType {
+        PREVIEW,
+        RECORD
+    };
+    status_t startStream(StreamType type,
+            const Vector<uint8_t> &outputStreams);
+
+    status_t stopStream();
+
+    // Callback for new recording frames from HAL
+    virtual void onFrameAvailable();
+    // Callback from stagefright which returns used recording frames
+    void releaseRecordingFrame(const sp<IMemory>& mem);
+
+    status_t dump(int fd, const Vector<String16>& args);
+
+  private:
+    mutable Mutex mMutex;
+
+    enum {
+        NO_STREAM = -1
+    };
+
+    wp<Camera2Client> mClient;
+
+    // Preview-related members
+    int mPreviewStreamId;
+    CameraMetadata mPreviewRequest;
+    sp<ANativeWindow> mPreviewWindow;
+
+    // Recording-related members
+    int mRecordingStreamId;
+    int mRecordingFrameCount;
+    sp<BufferItemConsumer> mRecordingConsumer;
+    sp<ANativeWindow>  mRecordingWindow;
+    CameraMetadata mRecordingRequest;
+    sp<camera2::Camera2Heap> mRecordingHeap;
+
+    static const size_t kDefaultRecordingHeapCount = 8;
+    size_t mRecordingHeapCount;
+    Vector<BufferItemConsumer::BufferItem> mRecordingBuffers;
+    size_t mRecordingHeapHead, mRecordingHeapFree;
+
+};
+
+
+}; // namespace camera2
+}; // namespace android
+
+#endif