Merge "Fix headroom management in equalizer" into jb-mr1-dev
diff --git a/camera/CameraParameters.cpp b/camera/CameraParameters.cpp
index 872512a..a657fe3 100644
--- a/camera/CameraParameters.cpp
+++ b/camera/CameraParameters.cpp
@@ -90,6 +90,7 @@
 const char CameraParameters::KEY_VIDEO_SNAPSHOT_SUPPORTED[] = "video-snapshot-supported";
 const char CameraParameters::KEY_VIDEO_STABILIZATION[] = "video-stabilization";
 const char CameraParameters::KEY_VIDEO_STABILIZATION_SUPPORTED[] = "video-stabilization-supported";
+const char CameraParameters::KEY_LIGHTFX[] = "light-fx";
 
 const char CameraParameters::TRUE[] = "true";
 const char CameraParameters::FALSE[] = "false";
@@ -166,6 +167,10 @@
 const char CameraParameters::FOCUS_MODE_CONTINUOUS_VIDEO[] = "continuous-video";
 const char CameraParameters::FOCUS_MODE_CONTINUOUS_PICTURE[] = "continuous-picture";
 
+// Values for light fx settings
+const char CameraParameters::LIGHTFX_LOWLIGHT[] = "low-light";
+const char CameraParameters::LIGHTFX_HDR[] = "high-dynamic-range";
+
 CameraParameters::CameraParameters()
                 : mMap()
 {
diff --git a/include/camera/CameraParameters.h b/include/camera/CameraParameters.h
index 4d5aa36..8668958 100644
--- a/include/camera/CameraParameters.h
+++ b/include/camera/CameraParameters.h
@@ -298,7 +298,7 @@
     // Example value: "42.5". Read only.
     static const char KEY_VERTICAL_VIEW_ANGLE[];
     // Exposure compensation index. 0 means exposure is not adjusted.
-    // Example value: "0" or "5". Read/write.
+    // Example value: "-5" or "5". Read/write.
     static const char KEY_EXPOSURE_COMPENSATION[];
     // The maximum exposure compensation index (>=0).
     // Example value: "6". Read only.
@@ -307,7 +307,7 @@
     // Example value: "-6". Read only.
     static const char KEY_MIN_EXPOSURE_COMPENSATION[];
     // The exposure compensation step. Exposure compensation index multiply by
-    // step eqals to EV. Ex: if exposure compensation index is 6 and step is
+    // step eqals to EV. Ex: if exposure compensation index is -6 and step is
     // 0.3333, EV is -2.
     // Example value: "0.333333333" or "0.5". Read only.
     static const char KEY_EXPOSURE_COMPENSATION_STEP[];
@@ -525,6 +525,10 @@
     // stream and record stabilized videos.
     static const char KEY_VIDEO_STABILIZATION_SUPPORTED[];
 
+    // Supported modes for special effects with light.
+    // Example values: "lowlight,hdr".
+    static const char KEY_LIGHTFX[];
+
     // Value for KEY_ZOOM_SUPPORTED or KEY_SMOOTH_ZOOM_SUPPORTED.
     static const char TRUE[];
     static const char FALSE[];
@@ -660,6 +664,12 @@
     // other modes.
     static const char FOCUS_MODE_CONTINUOUS_PICTURE[];
 
+    // Values for light special effects
+    // Low-light enhancement mode
+    static const char LIGHTFX_LOWLIGHT[];
+    // High-dynamic range mode
+    static const char LIGHTFX_HDR[];
+
 private:
     DefaultKeyedVector<String8,String8>    mMap;
 };
diff --git a/include/media/IRemoteDisplay.h b/include/media/IRemoteDisplay.h
index f39286e..a61704e 100644
--- a/include/media/IRemoteDisplay.h
+++ b/include/media/IRemoteDisplay.h
@@ -39,10 +39,8 @@
 public:
     DECLARE_META_INTERFACE(RemoteDisplay);
 
-    // Disconnects the remote display.
-    // The remote display should respond back to the IRemoteDisplayClient with an
-    // onDisplayDisconnected() event when the disconnection is complete.
-    virtual status_t disconnect() = 0;
+    // Disconnects the remote display and stops listening for new connections.
+    virtual status_t dispose() = 0;
 };
 
 
diff --git a/include/media/IRemoteDisplayClient.h b/include/media/IRemoteDisplayClient.h
index 38a0c9a..553ad36 100644
--- a/include/media/IRemoteDisplayClient.h
+++ b/include/media/IRemoteDisplayClient.h
@@ -40,9 +40,9 @@
 
     enum {
         // Error: An unknown / generic error occurred.
-        kErrorUnknown = 0,
+        kDisplayErrorUnknown = 1,
         // Error: The connection was dropped unexpectedly.
-        kErrorConnectionDropped = 1,
+        kDisplayErrorConnectionDropped = 2,
     };
 
     // Indicates that the remote display has been connected successfully.
@@ -52,7 +52,8 @@
             uint32_t width, uint32_t height, uint32_t flags) = 0; // one-way
 
     // Indicates that the remote display has been disconnected normally.
-    // This method should only be called once the client has called 'disconnect()'.
+    // This method should only be called once the client has called 'dispose()'
+    // on the IRemoteDisplay.
     // It is currently an error for the display to disconnect for any other reason.
     virtual void onDisplayDisconnected() = 0; // one-way
 
diff --git a/include/media/MediaPlayerInterface.h b/include/media/MediaPlayerInterface.h
index 518948c..b7bee3f 100644
--- a/include/media/MediaPlayerInterface.h
+++ b/include/media/MediaPlayerInterface.h
@@ -150,16 +150,16 @@
     virtual status_t    setParameter(int key, const Parcel &request) = 0;
     virtual status_t    getParameter(int key, Parcel *reply) = 0;
 
-    // Right now, only the AAX TX player supports this functionality.  For now,
-    // provide default implementations which indicate a lack of support for this
-    // functionality to make life easier for all of the other media player
-    // maintainers out there.
+    // default no-op implementation of optional extensions
     virtual status_t setRetransmitEndpoint(const struct sockaddr_in* endpoint) {
         return INVALID_OPERATION;
     }
     virtual status_t getRetransmitEndpoint(struct sockaddr_in* endpoint) {
         return INVALID_OPERATION;
     }
+    virtual status_t setNextPlayer(const sp<MediaPlayerBase>& next) {
+        return OK;
+    }
 
     // Invoke a generic method on the player by using opaque parcels
     // for the request and reply.
diff --git a/include/media/stagefright/Utils.h b/include/media/stagefright/Utils.h
index d87902e..8213af9 100644
--- a/include/media/stagefright/Utils.h
+++ b/include/media/stagefright/Utils.h
@@ -42,6 +42,8 @@
 struct AMessage;
 status_t convertMetaDataToMessage(
         const sp<MetaData> &meta, sp<AMessage> *format);
+void convertMessageToMetaData(
+        const sp<AMessage> &format, sp<MetaData> &meta);
 
 }  // namespace android
 
diff --git a/media/libeffects/preprocessing/PreProcessing.cpp b/media/libeffects/preprocessing/PreProcessing.cpp
index 5709837..597866a 100755
--- a/media/libeffects/preprocessing/PreProcessing.cpp
+++ b/media/libeffects/preprocessing/PreProcessing.cpp
@@ -517,6 +517,10 @@
     webrtc::EchoControlMobile *aec = static_cast<webrtc::EchoControlMobile *>(effect->engine);
     webrtc::EchoControlMobile::RoutingMode mode = webrtc::EchoControlMobile::kQuietEarpieceOrHeadset;
 
+    if (audio_is_input_device(device)) {
+        return 0;
+    }
+
     switch(device) {
     case AUDIO_DEVICE_OUT_EARPIECE:
         mode = webrtc::EchoControlMobile::kEarpiece;
diff --git a/media/libmedia/AudioRecord.cpp b/media/libmedia/AudioRecord.cpp
index 5060525..8ea6306 100644
--- a/media/libmedia/AudioRecord.cpp
+++ b/media/libmedia/AudioRecord.cpp
@@ -177,7 +177,7 @@
 
     // validate framecount
     int minFrameCount = 0;
-    status_t status = getMinFrameCount(&minFrameCount, sampleRate, format, channelCount);
+    status_t status = getMinFrameCount(&minFrameCount, sampleRate, format, channelMask);
     if (status != NO_ERROR) {
         return status;
     }
diff --git a/media/libmedia/IRemoteDisplay.cpp b/media/libmedia/IRemoteDisplay.cpp
index 5d6ab34..da25a15 100644
--- a/media/libmedia/IRemoteDisplay.cpp
+++ b/media/libmedia/IRemoteDisplay.cpp
@@ -22,7 +22,7 @@
 namespace android {
 
 enum {
-    DISCONNECT = IBinder::FIRST_CALL_TRANSACTION,
+    DISPOSE = IBinder::FIRST_CALL_TRANSACTION,
 };
 
 class BpRemoteDisplay: public BpInterface<IRemoteDisplay>
@@ -33,11 +33,11 @@
     {
     }
 
-    status_t disconnect()
+    status_t dispose()
     {
         Parcel data, reply;
         data.writeInterfaceToken(IRemoteDisplay::getInterfaceDescriptor());
-        remote()->transact(DISCONNECT, data, &reply);
+        remote()->transact(DISPOSE, data, &reply);
         return reply.readInt32();
     }
 };
@@ -50,9 +50,9 @@
     uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
 {
     switch (code) {
-        case DISCONNECT: {
+        case DISPOSE: {
             CHECK_INTERFACE(IRemoteDisplay, data, reply);
-            reply->writeInt32(disconnect());
+            reply->writeInt32(dispose());
             return NO_ERROR;
         }
         default:
diff --git a/media/libmediaplayerservice/MediaPlayerFactory.cpp b/media/libmediaplayerservice/MediaPlayerFactory.cpp
index f821cc3..3f69c11 100644
--- a/media/libmediaplayerservice/MediaPlayerFactory.cpp
+++ b/media/libmediaplayerservice/MediaPlayerFactory.cpp
@@ -246,6 +246,7 @@
                                                  ".midi",
                                                  ".smf",
                                                  ".xmf",
+                                                 ".mxmf",
                                                  ".imy",
                                                  ".rtttl",
                                                  ".rtx",
diff --git a/media/libmediaplayerservice/MediaPlayerService.cpp b/media/libmediaplayerservice/MediaPlayerService.cpp
index 9005500..6b57c48 100644
--- a/media/libmediaplayerservice/MediaPlayerService.cpp
+++ b/media/libmediaplayerservice/MediaPlayerService.cpp
@@ -283,7 +283,7 @@
 
 sp<IRemoteDisplay> MediaPlayerService::listenForRemoteDisplay(
         const sp<IRemoteDisplayClient>& client, const String8& iface) {
-    return new RemoteDisplay(client, iface.string());;
+    return new RemoteDisplay(client, iface.string());
 }
 
 status_t MediaPlayerService::enableRemoteDisplay(const char *iface) {
@@ -299,7 +299,7 @@
     }
 
     if (mRemoteDisplay != NULL) {
-        mRemoteDisplay->disconnect();
+        mRemoteDisplay->dispose();
         mRemoteDisplay.clear();
     }
 
@@ -922,15 +922,22 @@
     Mutex::Autolock l(mLock);
     sp<Client> c = static_cast<Client*>(player.get());
     mNextClient = c;
-    if (mAudioOutput != NULL && c != NULL) {
-        mAudioOutput->setNextOutput(c->mAudioOutput);
-    } else {
-        ALOGE("no current audio output");
+
+    if (c != NULL) {
+        if (mAudioOutput != NULL) {
+            mAudioOutput->setNextOutput(c->mAudioOutput);
+        } else if ((mPlayer != NULL) && !mPlayer->hardwareOutput()) {
+            ALOGE("no current audio output");
+        }
+
+        if ((mPlayer != NULL) && (mNextClient->getPlayer() != NULL)) {
+            mPlayer->setNextPlayer(mNextClient->getPlayer());
+        }
     }
+
     return OK;
 }
 
-
 status_t MediaPlayerService::Client::seekTo(int msec)
 {
     ALOGV("[%d] seekTo(%d)", mConnId, msec);
diff --git a/media/libmediaplayerservice/RemoteDisplay.cpp b/media/libmediaplayerservice/RemoteDisplay.cpp
index 1cc605e..5542bb5 100644
--- a/media/libmediaplayerservice/RemoteDisplay.cpp
+++ b/media/libmediaplayerservice/RemoteDisplay.cpp
@@ -39,7 +39,7 @@
 RemoteDisplay::~RemoteDisplay() {
 }
 
-status_t RemoteDisplay::disconnect() {
+status_t RemoteDisplay::dispose() {
     mSource->stop();
 
     mLooper->stop();
diff --git a/media/libmediaplayerservice/RemoteDisplay.h b/media/libmediaplayerservice/RemoteDisplay.h
index 63c5286..0d87250 100644
--- a/media/libmediaplayerservice/RemoteDisplay.h
+++ b/media/libmediaplayerservice/RemoteDisplay.h
@@ -33,7 +33,7 @@
 struct RemoteDisplay : public BnRemoteDisplay {
     RemoteDisplay(const sp<IRemoteDisplayClient> &client, const char *iface);
 
-    virtual status_t disconnect();
+    virtual status_t dispose();
 
 protected:
     virtual ~RemoteDisplay();
diff --git a/media/libmediaplayerservice/nuplayer/mp4/MP4Source.cpp b/media/libmediaplayerservice/nuplayer/mp4/MP4Source.cpp
index c80d13f..ffb3a65 100644
--- a/media/libmediaplayerservice/nuplayer/mp4/MP4Source.cpp
+++ b/media/libmediaplayerservice/nuplayer/mp4/MP4Source.cpp
@@ -93,6 +93,10 @@
         return total;
     }
 
+    bool isSeekable() {
+        return false;
+    }
+
 private:
     sp<NuPlayer::NuPlayerStreamListener> mListener;
     off64_t mPosition;
diff --git a/media/libstagefright/Android.mk b/media/libstagefright/Android.mk
index 1522e75..f40982e 100644
--- a/media/libstagefright/Android.mk
+++ b/media/libstagefright/Android.mk
@@ -19,6 +19,7 @@
         ESDS.cpp                          \
         FileSource.cpp                    \
         FLACExtractor.cpp                 \
+        FragmentedMP4Extractor.cpp        \
         HTTPBase.cpp                      \
         JPEGSource.cpp                    \
         MP3Extractor.cpp                  \
diff --git a/media/libstagefright/DRMExtractor.cpp b/media/libstagefright/DRMExtractor.cpp
index 524c3aa..63cb430 100644
--- a/media/libstagefright/DRMExtractor.cpp
+++ b/media/libstagefright/DRMExtractor.cpp
@@ -15,11 +15,6 @@
  */
 
 #include "include/DRMExtractor.h"
-#include "include/AMRExtractor.h"
-#include "include/MP3Extractor.h"
-#include "include/MPEG4Extractor.h"
-#include "include/WAVExtractor.h"
-#include "include/OggExtractor.h"
 
 #include <arpa/inet.h>
 #include <utils/String8.h>
diff --git a/media/libstagefright/DataSource.cpp b/media/libstagefright/DataSource.cpp
index 1de808e..9d0eea2 100644
--- a/media/libstagefright/DataSource.cpp
+++ b/media/libstagefright/DataSource.cpp
@@ -20,17 +20,18 @@
 #include "include/chromium_http_stub.h"
 #endif
 
-#include "include/MP3Extractor.h"
-#include "include/MPEG4Extractor.h"
-#include "include/WAVExtractor.h"
-#include "include/OggExtractor.h"
-#include "include/MPEG2PSExtractor.h"
-#include "include/MPEG2TSExtractor.h"
-#include "include/NuCachedSource2.h"
-#include "include/HTTPBase.h"
+#include "include/AACExtractor.h"
 #include "include/DRMExtractor.h"
 #include "include/FLACExtractor.h"
-#include "include/AACExtractor.h"
+#include "include/FragmentedMP4Extractor.h"
+#include "include/HTTPBase.h"
+#include "include/MP3Extractor.h"
+#include "include/MPEG2PSExtractor.h"
+#include "include/MPEG2TSExtractor.h"
+#include "include/MPEG4Extractor.h"
+#include "include/NuCachedSource2.h"
+#include "include/OggExtractor.h"
+#include "include/WAVExtractor.h"
 #include "include/WVMExtractor.h"
 
 #include "matroska/MatroskaExtractor.h"
@@ -110,6 +111,7 @@
 // static
 void DataSource::RegisterDefaultSniffers() {
     RegisterSniffer(SniffMPEG4);
+    RegisterSniffer(SniffFragmentedMP4);
     RegisterSniffer(SniffMatroska);
     RegisterSniffer(SniffOgg);
     RegisterSniffer(SniffWAV);
diff --git a/media/libstagefright/FragmentedMP4Extractor.cpp b/media/libstagefright/FragmentedMP4Extractor.cpp
new file mode 100644
index 0000000..82712ef
--- /dev/null
+++ b/media/libstagefright/FragmentedMP4Extractor.cpp
@@ -0,0 +1,460 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "FragmentedMP4Extractor"
+#include <utils/Log.h>
+
+#include "include/FragmentedMP4Extractor.h"
+#include "include/SampleTable.h"
+#include "include/ESDS.h"
+
+#include <arpa/inet.h>
+
+#include <ctype.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <cutils/properties.h> // for property_get
+
+#include <media/stagefright/foundation/ABitReader.h>
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/DataSource.h>
+#include <media/stagefright/MediaBuffer.h>
+#include <media/stagefright/MediaBufferGroup.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/MediaSource.h>
+#include <media/stagefright/MetaData.h>
+#include <media/stagefright/Utils.h>
+#include <utils/String8.h>
+
+namespace android {
+
+class FragmentedMPEG4Source : public MediaSource {
+public:
+    // Caller retains ownership of the Parser
+    FragmentedMPEG4Source(bool audio,
+                const sp<MetaData> &format,
+                const sp<FragmentedMP4Parser> &parser,
+                const sp<FragmentedMP4Extractor> &extractor);
+
+    virtual status_t start(MetaData *params = NULL);
+    virtual status_t stop();
+
+    virtual sp<MetaData> getFormat();
+
+    virtual status_t read(
+            MediaBuffer **buffer, const ReadOptions *options = NULL);
+
+protected:
+    virtual ~FragmentedMPEG4Source();
+
+private:
+    Mutex mLock;
+
+    sp<MetaData> mFormat;
+    sp<FragmentedMP4Parser> mParser;
+    sp<FragmentedMP4Extractor> mExtractor;
+    bool mIsAudioTrack;
+    uint32_t mCurrentSampleIndex;
+
+    bool mIsAVC;
+    size_t mNALLengthSize;
+
+    bool mStarted;
+
+    MediaBufferGroup *mGroup;
+
+    bool mWantsNALFragments;
+
+    uint8_t *mSrcBuffer;
+
+    FragmentedMPEG4Source(const FragmentedMPEG4Source &);
+    FragmentedMPEG4Source &operator=(const FragmentedMPEG4Source &);
+};
+
+
+FragmentedMP4Extractor::FragmentedMP4Extractor(const sp<DataSource> &source)
+    : mLooper(new ALooper),
+      mParser(new FragmentedMP4Parser()),
+      mDataSource(source),
+      mInitCheck(NO_INIT),
+      mFileMetaData(new MetaData) {
+    ALOGV("FragmentedMP4Extractor");
+    mLooper->registerHandler(mParser);
+    mLooper->start(false /* runOnCallingThread */);
+    mParser->start(mDataSource);
+
+    bool hasVideo = mParser->getFormat(false /* audio */, true /* synchronous */) != NULL;
+    bool hasAudio = mParser->getFormat(true /* audio */, true /* synchronous */) != NULL;
+
+    ALOGV("number of tracks: %d", countTracks());
+
+    if (hasVideo) {
+        mFileMetaData->setCString(
+                kKeyMIMEType, MEDIA_MIMETYPE_CONTAINER_MPEG4);
+    } else if (hasAudio) {
+        mFileMetaData->setCString(kKeyMIMEType, "audio/mp4");
+    } else {
+        ALOGE("no audio and no video, no idea what file type this is");
+    }
+    // tracks are numbered such that video track is first, audio track is second
+    if (hasAudio && hasVideo) {
+        mTrackCount = 2;
+        mAudioTrackIndex = 1;
+    } else if (hasAudio) {
+        mTrackCount = 1;
+        mAudioTrackIndex = 0;
+    } else if (hasVideo) {
+        mTrackCount = 1;
+        mAudioTrackIndex = -1;
+    } else {
+        mTrackCount = 0;
+        mAudioTrackIndex = -1;
+    }
+}
+
+FragmentedMP4Extractor::~FragmentedMP4Extractor() {
+    ALOGV("~FragmentedMP4Extractor");
+    mLooper->stop();
+}
+
+uint32_t FragmentedMP4Extractor::flags() const {
+    return CAN_PAUSE |
+            (mParser->isSeekable() ? (CAN_SEEK_BACKWARD | CAN_SEEK_FORWARD | CAN_SEEK) : 0);
+}
+
+sp<MetaData> FragmentedMP4Extractor::getMetaData() {
+    return mFileMetaData;
+}
+
+size_t FragmentedMP4Extractor::countTracks() {
+    return mTrackCount;
+}
+
+
+sp<MetaData> FragmentedMP4Extractor::getTrackMetaData(
+        size_t index, uint32_t flags) {
+    if (index >= countTracks()) {
+        return NULL;
+    }
+
+    sp<AMessage> msg = mParser->getFormat(index == mAudioTrackIndex, true /* synchronous */);
+
+    if (msg == NULL) {
+        ALOGV("got null format for track %d", index);
+        return NULL;
+    }
+
+    sp<MetaData> meta = new MetaData();
+    convertMessageToMetaData(msg, meta);
+    return meta;
+}
+
+static void MakeFourCCString(uint32_t x, char *s) {
+    s[0] = x >> 24;
+    s[1] = (x >> 16) & 0xff;
+    s[2] = (x >> 8) & 0xff;
+    s[3] = x & 0xff;
+    s[4] = '\0';
+}
+
+sp<MediaSource> FragmentedMP4Extractor::getTrack(size_t index) {
+    if (index >= countTracks()) {
+        return NULL;
+    }
+    return new FragmentedMPEG4Source(index == mAudioTrackIndex, getTrackMetaData(index, 0), mParser, this);
+}
+
+
+////////////////////////////////////////////////////////////////////////////////
+
+FragmentedMPEG4Source::FragmentedMPEG4Source(
+        bool audio,
+        const sp<MetaData> &format,
+        const sp<FragmentedMP4Parser> &parser,
+        const sp<FragmentedMP4Extractor> &extractor)
+    : mFormat(format),
+      mParser(parser),
+      mExtractor(extractor),
+      mIsAudioTrack(audio),
+      mStarted(false),
+      mGroup(NULL),
+      mWantsNALFragments(false),
+      mSrcBuffer(NULL) {
+}
+
+FragmentedMPEG4Source::~FragmentedMPEG4Source() {
+    if (mStarted) {
+        stop();
+    }
+}
+
+status_t FragmentedMPEG4Source::start(MetaData *params) {
+    Mutex::Autolock autoLock(mLock);
+
+    CHECK(!mStarted);
+
+    int32_t val;
+    if (params && params->findInt32(kKeyWantsNALFragments, &val)
+        && val != 0) {
+        mWantsNALFragments = true;
+    } else {
+        mWantsNALFragments = false;
+    }
+    ALOGV("caller wants NAL fragments: %s", mWantsNALFragments ? "yes" : "no");
+
+    mGroup = new MediaBufferGroup;
+
+    int32_t max_size = 65536;
+    // XXX CHECK(mFormat->findInt32(kKeyMaxInputSize, &max_size));
+
+    mGroup->add_buffer(new MediaBuffer(max_size));
+
+    mSrcBuffer = new uint8_t[max_size];
+
+    mStarted = true;
+
+    return OK;
+}
+
+status_t FragmentedMPEG4Source::stop() {
+    Mutex::Autolock autoLock(mLock);
+
+    CHECK(mStarted);
+
+    delete[] mSrcBuffer;
+    mSrcBuffer = NULL;
+
+    delete mGroup;
+    mGroup = NULL;
+
+    mStarted = false;
+    mCurrentSampleIndex = 0;
+
+    return OK;
+}
+
+sp<MetaData> FragmentedMPEG4Source::getFormat() {
+    Mutex::Autolock autoLock(mLock);
+
+    return mFormat;
+}
+
+
+status_t FragmentedMPEG4Source::read(
+        MediaBuffer **out, const ReadOptions *options) {
+    int64_t seekTimeUs;
+    ReadOptions::SeekMode mode;
+    if (options && options->getSeekTo(&seekTimeUs, &mode)) {
+        mParser->seekTo(mIsAudioTrack, seekTimeUs);
+    }
+    MediaBuffer *buffer = NULL;
+    mGroup->acquire_buffer(&buffer);
+    sp<ABuffer> parseBuffer;
+
+    status_t ret = mParser->dequeueAccessUnit(mIsAudioTrack, &parseBuffer, true /* synchronous */);
+    if (ret != OK) {
+        buffer->release();
+        ALOGV("returning %d", ret);
+        return ret;
+    }
+    sp<AMessage> meta = parseBuffer->meta();
+    int64_t timeUs;
+    CHECK(meta->findInt64("timeUs", &timeUs));
+    buffer->meta_data()->setInt64(kKeyTime, timeUs);
+    buffer->set_range(0, parseBuffer->size());
+    memcpy(buffer->data(), parseBuffer->data(), parseBuffer->size());
+    *out = buffer;
+    return OK;
+}
+
+
+static bool isCompatibleBrand(uint32_t fourcc) {
+    static const uint32_t kCompatibleBrands[] = {
+        FOURCC('i', 's', 'o', 'm'),
+        FOURCC('i', 's', 'o', '2'),
+        FOURCC('a', 'v', 'c', '1'),
+        FOURCC('3', 'g', 'p', '4'),
+        FOURCC('m', 'p', '4', '1'),
+        FOURCC('m', 'p', '4', '2'),
+
+        // Won't promise that the following file types can be played.
+        // Just give these file types a chance.
+        FOURCC('q', 't', ' ', ' '),  // Apple's QuickTime
+        FOURCC('M', 'S', 'N', 'V'),  // Sony's PSP
+
+        FOURCC('3', 'g', '2', 'a'),  // 3GPP2
+        FOURCC('3', 'g', '2', 'b'),
+    };
+
+    for (size_t i = 0;
+         i < sizeof(kCompatibleBrands) / sizeof(kCompatibleBrands[0]);
+         ++i) {
+        if (kCompatibleBrands[i] == fourcc) {
+            return true;
+        }
+    }
+
+    return false;
+}
+
+// Attempt to actually parse the 'ftyp' atom and determine if a suitable
+// compatible brand is present.
+// Also try to identify where this file's metadata ends
+// (end of the 'moov' atom) and report it to the caller as part of
+// the metadata.
+static bool Sniff(
+        const sp<DataSource> &source, String8 *mimeType, float *confidence,
+        sp<AMessage> *meta) {
+    // We scan up to 128k bytes to identify this file as an MP4.
+    static const off64_t kMaxScanOffset = 128ll * 1024ll;
+
+    off64_t offset = 0ll;
+    bool foundGoodFileType = false;
+    bool isFragmented = false;
+    off64_t moovAtomEndOffset = -1ll;
+    bool done = false;
+
+    while (!done && offset < kMaxScanOffset) {
+        uint32_t hdr[2];
+        if (source->readAt(offset, hdr, 8) < 8) {
+            return false;
+        }
+
+        uint64_t chunkSize = ntohl(hdr[0]);
+        uint32_t chunkType = ntohl(hdr[1]);
+        off64_t chunkDataOffset = offset + 8;
+
+        if (chunkSize == 1) {
+            if (source->readAt(offset + 8, &chunkSize, 8) < 8) {
+                return false;
+            }
+
+            chunkSize = ntoh64(chunkSize);
+            chunkDataOffset += 8;
+
+            if (chunkSize < 16) {
+                // The smallest valid chunk is 16 bytes long in this case.
+                return false;
+            }
+        } else if (chunkSize < 8) {
+            // The smallest valid chunk is 8 bytes long.
+            return false;
+        }
+
+        off64_t chunkDataSize = offset + chunkSize - chunkDataOffset;
+
+        char chunkstring[5];
+        MakeFourCCString(chunkType, chunkstring);
+        ALOGV("saw chunk type %s, size %lld @ %lld", chunkstring, chunkSize, offset);
+        switch (chunkType) {
+            case FOURCC('f', 't', 'y', 'p'):
+            {
+                if (chunkDataSize < 8) {
+                    return false;
+                }
+
+                uint32_t numCompatibleBrands = (chunkDataSize - 8) / 4;
+                for (size_t i = 0; i < numCompatibleBrands + 2; ++i) {
+                    if (i == 1) {
+                        // Skip this index, it refers to the minorVersion,
+                        // not a brand.
+                        continue;
+                    }
+
+                    uint32_t brand;
+                    if (source->readAt(
+                                chunkDataOffset + 4 * i, &brand, 4) < 4) {
+                        return false;
+                    }
+
+                    brand = ntohl(brand);
+                    char brandstring[5];
+                    MakeFourCCString(brand, brandstring);
+                    ALOGV("Brand: %s", brandstring);
+
+                    if (isCompatibleBrand(brand)) {
+                        foundGoodFileType = true;
+                        break;
+                    }
+                }
+
+                if (!foundGoodFileType) {
+                    return false;
+                }
+
+                break;
+            }
+
+            case FOURCC('m', 'o', 'o', 'v'):
+            {
+                moovAtomEndOffset = offset + chunkSize;
+                break;
+            }
+
+            case FOURCC('m', 'o', 'o', 'f'):
+            {
+                // this is kind of broken, since we might not actually find a
+                // moof box in the first 128k.
+                isFragmented = true;
+                done = true;
+                break;
+            }
+
+            default:
+                break;
+        }
+
+        offset += chunkSize;
+    }
+
+    if (!foundGoodFileType || !isFragmented) {
+        return false;
+    }
+
+    *mimeType = MEDIA_MIMETYPE_CONTAINER_MPEG4;
+    *confidence = 0.5f; // slightly more than MPEG4Extractor
+
+    if (moovAtomEndOffset >= 0) {
+        *meta = new AMessage;
+        (*meta)->setInt64("meta-data-size", moovAtomEndOffset);
+        (*meta)->setInt32("fragmented", 1); // tell MediaExtractor what to instantiate
+
+        ALOGV("found metadata size: %lld", moovAtomEndOffset);
+    }
+
+    return true;
+}
+
+// used by DataSource::RegisterDefaultSniffers
+bool SniffFragmentedMP4(
+        const sp<DataSource> &source, String8 *mimeType, float *confidence,
+        sp<AMessage> *meta) {
+    ALOGV("SniffFragmentedMP4");
+    char prop[PROPERTY_VALUE_MAX];
+    if (property_get("media.stagefright.use-fragmp4", prop, NULL)
+            && (!strcmp(prop, "1") || !strcasecmp(prop, "true"))) {
+        return Sniff(source, mimeType, confidence, meta);
+    }
+
+    return false;
+}
+
+}  // namespace android
diff --git a/media/libstagefright/MP3Extractor.cpp b/media/libstagefright/MP3Extractor.cpp
index 6abaf23..d94054b 100644
--- a/media/libstagefright/MP3Extractor.cpp
+++ b/media/libstagefright/MP3Extractor.cpp
@@ -228,6 +228,7 @@
     virtual ~MP3Source();
 
 private:
+    static const size_t kMaxFrameSize;
     sp<MetaData> mMeta;
     sp<DataSource> mDataSource;
     off64_t mFirstFramePos;
@@ -405,6 +406,13 @@
 
 ////////////////////////////////////////////////////////////////////////////////
 
+// The theoretical maximum frame size for an MPEG audio stream should occur
+// while playing a Layer 2, MPEGv2.5 audio stream at 160kbps (with padding).
+// The size of this frame should be...
+// ((1152 samples/frame * 160000 bits/sec) /
+//  (8000 samples/sec * 8 bits/byte)) + 1 padding byte/frame = 2881 bytes/frame.
+// Set our max frame size to the nearest power of 2 above this size (aka, 4kB)
+const size_t MP3Source::kMaxFrameSize = (1 << 12); /* 4096 bytes */
 MP3Source::MP3Source(
         const sp<MetaData> &meta, const sp<DataSource> &source,
         off64_t first_frame_pos, uint32_t fixed_header,
@@ -433,7 +441,6 @@
 
     mGroup = new MediaBufferGroup;
 
-    const size_t kMaxFrameSize = 32768;
     mGroup->add_buffer(new MediaBuffer(kMaxFrameSize));
 
     mCurrentPos = mFirstFramePos;
diff --git a/media/libstagefright/MPEG4Extractor.cpp b/media/libstagefright/MPEG4Extractor.cpp
index a572541..dc8e4a3 100644
--- a/media/libstagefright/MPEG4Extractor.cpp
+++ b/media/libstagefright/MPEG4Extractor.cpp
@@ -14,6 +14,7 @@
  * limitations under the License.
  */
 
+//#define LOG_NDEBUG 0
 #define LOG_TAG "MPEG4Extractor"
 #include <utils/Log.h>
 
@@ -408,7 +409,7 @@
 }
 
 // Reads an encoded integer 7 bits at a time until it encounters the high bit clear.
-int32_t readSize(off64_t offset,
+static int32_t readSize(off64_t offset,
         const sp<DataSource> DataSource, uint8_t *numOfBytes) {
     uint32_t size = 0;
     uint8_t data;
@@ -1664,15 +1665,26 @@
                     mLastCommentData.setTo((const char *)buffer + 8);
                     break;
             }
-            if (mLastCommentMean == "com.apple.iTunes"
-                    && mLastCommentName == "iTunSMPB"
-                    && mLastCommentData.length() != 0) {
-                int32_t delay, padding;
-                if (sscanf(mLastCommentData,
-                           " %*x %x %x %*x", &delay, &padding) == 2) {
-                    mLastTrack->meta->setInt32(kKeyEncoderDelay, delay);
-                    mLastTrack->meta->setInt32(kKeyEncoderPadding, padding);
+
+            // Once we have a set of mean/name/data info, go ahead and process
+            // it to see if its something we are interested in.  Whether or not
+            // were are interested in the specific tag, make sure to clear out
+            // the set so we can be ready to process another tuple should one
+            // show up later in the file.
+            if ((mLastCommentMean.length() != 0) &&
+                (mLastCommentName.length() != 0) &&
+                (mLastCommentData.length() != 0)) {
+
+                if (mLastCommentMean == "com.apple.iTunes"
+                        && mLastCommentName == "iTunSMPB") {
+                    int32_t delay, padding;
+                    if (sscanf(mLastCommentData,
+                               " %*x %x %x %*x", &delay, &padding) == 2) {
+                        mLastTrack->meta->setInt32(kKeyEncoderDelay, delay);
+                        mLastTrack->meta->setInt32(kKeyEncoderPadding, padding);
+                    }
                 }
+
                 mLastCommentMean.clear();
                 mLastCommentName.clear();
                 mLastCommentData.clear();
diff --git a/media/libstagefright/MediaDefs.cpp b/media/libstagefright/MediaDefs.cpp
index 2740d6b..e7b5903 100644
--- a/media/libstagefright/MediaDefs.cpp
+++ b/media/libstagefright/MediaDefs.cpp
@@ -42,7 +42,7 @@
 const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS = "audio/aac-adts";
 
 const char *MEDIA_MIMETYPE_CONTAINER_MPEG4 = "video/mp4";
-const char *MEDIA_MIMETYPE_CONTAINER_WAV = "audio/wav";
+const char *MEDIA_MIMETYPE_CONTAINER_WAV = "audio/x-wav";
 const char *MEDIA_MIMETYPE_CONTAINER_OGG = "application/ogg";
 const char *MEDIA_MIMETYPE_CONTAINER_MATROSKA = "video/x-matroska";
 const char *MEDIA_MIMETYPE_CONTAINER_MPEG2TS = "video/mp2ts";
diff --git a/media/libstagefright/MediaExtractor.cpp b/media/libstagefright/MediaExtractor.cpp
index 9ab6611..b18c916 100644
--- a/media/libstagefright/MediaExtractor.cpp
+++ b/media/libstagefright/MediaExtractor.cpp
@@ -21,6 +21,7 @@
 #include "include/AMRExtractor.h"
 #include "include/MP3Extractor.h"
 #include "include/MPEG4Extractor.h"
+#include "include/FragmentedMP4Extractor.h"
 #include "include/WAVExtractor.h"
 #include "include/OggExtractor.h"
 #include "include/MPEG2PSExtractor.h"
@@ -93,7 +94,12 @@
     MediaExtractor *ret = NULL;
     if (!strcasecmp(mime, MEDIA_MIMETYPE_CONTAINER_MPEG4)
             || !strcasecmp(mime, "audio/mp4")) {
-        ret = new MPEG4Extractor(source);
+        int fragmented = 0;
+        if (meta != NULL && meta->findInt32("fragmented", &fragmented) && fragmented) {
+            ret = new FragmentedMP4Extractor(source);
+        } else {
+            ret = new MPEG4Extractor(source);
+        }
     } else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_MPEG)) {
         ret = new MP3Extractor(source, meta);
     } else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AMR_NB)
diff --git a/media/libstagefright/MetaData.cpp b/media/libstagefright/MetaData.cpp
index 755594a..a01ec97 100644
--- a/media/libstagefright/MetaData.cpp
+++ b/media/libstagefright/MetaData.cpp
@@ -22,6 +22,8 @@
 #include <string.h>
 
 #include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AString.h>
+#include <media/stagefright/foundation/hexdump.h>
 #include <media/stagefright/MetaData.h>
 
 namespace android {
@@ -318,6 +320,12 @@
 
         default:
             out = String8::format("(unknown type %d, size %d)", mType, mSize);
+            if (mSize <= 48) { // if it's less than three lines of hex data, dump it
+                AString foo;
+                hexdump(data, mSize, 0, &foo);
+                out.append("\n");
+                out.append(foo.c_str());
+            }
             break;
     }
     return out;
diff --git a/media/libstagefright/Utils.cpp b/media/libstagefright/Utils.cpp
index 2a16f66..74e9222 100644
--- a/media/libstagefright/Utils.cpp
+++ b/media/libstagefright/Utils.cpp
@@ -241,5 +241,196 @@
     return OK;
 }
 
+static size_t reassembleAVCC(const sp<ABuffer> &csd0, const sp<ABuffer> csd1, char *avcc) {
+
+    avcc[0] = 1;        // version
+    avcc[1] = 0x64;     // profile
+    avcc[2] = 0;        // unused (?)
+    avcc[3] = 0xd;      // level
+    avcc[4] = 0xff;     // reserved+size
+
+    size_t i = 0;
+    int numparams = 0;
+    int lastparamoffset = 0;
+    int avccidx = 6;
+    do {
+        if (i >= csd0->size() - 4 ||
+                memcmp(csd0->data() + i, "\x00\x00\x00\x01", 4) == 0) {
+            if (i >= csd0->size() - 4) {
+                // there can't be another param here, so use all the rest
+                i = csd0->size();
+            }
+            ALOGV("block at %d, last was %d", i, lastparamoffset);
+            if (lastparamoffset > 0) {
+                int size = i - lastparamoffset;
+                avcc[avccidx++] = size >> 8;
+                avcc[avccidx++] = size & 0xff;
+                memcpy(avcc+avccidx, csd0->data() + lastparamoffset, size);
+                avccidx += size;
+                numparams++;
+            }
+            i += 4;
+            lastparamoffset = i;
+        } else {
+            i++;
+        }
+    } while(i < csd0->size());
+    ALOGV("csd0 contains %d params", numparams);
+
+    avcc[5] = 0xe0 | numparams;
+    //and now csd-1
+    i = 0;
+    numparams = 0;
+    lastparamoffset = 0;
+    int numpicparamsoffset = avccidx;
+    avccidx++;
+    do {
+        if (i >= csd1->size() - 4 ||
+                memcmp(csd1->data() + i, "\x00\x00\x00\x01", 4) == 0) {
+            if (i >= csd1->size() - 4) {
+                // there can't be another param here, so use all the rest
+                i = csd1->size();
+            }
+            ALOGV("block at %d, last was %d", i, lastparamoffset);
+            if (lastparamoffset > 0) {
+                int size = i - lastparamoffset;
+                avcc[avccidx++] = size >> 8;
+                avcc[avccidx++] = size & 0xff;
+                memcpy(avcc+avccidx, csd1->data() + lastparamoffset, size);
+                avccidx += size;
+                numparams++;
+            }
+            i += 4;
+            lastparamoffset = i;
+        } else {
+            i++;
+        }
+    } while(i < csd1->size());
+    avcc[numpicparamsoffset] = numparams;
+    return avccidx;
+}
+
+static void reassembleESDS(const sp<ABuffer> &csd0, char *esds) {
+    int csd0size = csd0->size();
+    esds[0] = 3; // kTag_ESDescriptor;
+    int esdescriptorsize = 26 + csd0size;
+    CHECK(esdescriptorsize < 268435456); // 7 bits per byte, so max is 2^28-1
+    esds[1] = 0x80 | (esdescriptorsize >> 21);
+    esds[2] = 0x80 | ((esdescriptorsize >> 14) & 0x7f);
+    esds[3] = 0x80 | ((esdescriptorsize >> 7) & 0x7f);
+    esds[4] = (esdescriptorsize & 0x7f);
+    esds[5] = esds[6] = 0; // es id
+    esds[7] = 0; // flags
+    esds[8] = 4; // kTag_DecoderConfigDescriptor
+    int configdescriptorsize = 18 + csd0size;
+    esds[9] = 0x80 | (configdescriptorsize >> 21);
+    esds[10] = 0x80 | ((configdescriptorsize >> 14) & 0x7f);
+    esds[11] = 0x80 | ((configdescriptorsize >> 7) & 0x7f);
+    esds[12] = (configdescriptorsize & 0x7f);
+    esds[13] = 0x40; // objectTypeIndication
+    esds[14] = 0x15; // not sure what 14-25 mean, they are ignored by ESDS.cpp,
+    esds[15] = 0x00; // but the actual values here were taken from a real file.
+    esds[16] = 0x18;
+    esds[17] = 0x00;
+    esds[18] = 0x00;
+    esds[19] = 0x00;
+    esds[20] = 0xfa;
+    esds[21] = 0x00;
+    esds[22] = 0x00;
+    esds[23] = 0x00;
+    esds[24] = 0xfa;
+    esds[25] = 0x00;
+    esds[26] = 5; // kTag_DecoderSpecificInfo;
+    esds[27] = 0x80 | (csd0size >> 21);
+    esds[28] = 0x80 | ((csd0size >> 14) & 0x7f);
+    esds[29] = 0x80 | ((csd0size >> 7) & 0x7f);
+    esds[30] = (csd0size & 0x7f);
+    memcpy((void*)&esds[31], csd0->data(), csd0size);
+    // data following this is ignored, so don't bother appending it
+
+}
+
+void convertMessageToMetaData(const sp<AMessage> &msg, sp<MetaData> &meta) {
+    AString mime;
+    if (msg->findString("mime", &mime)) {
+        meta->setCString(kKeyMIMEType, mime.c_str());
+    } else {
+        ALOGW("did not find mime type");
+    }
+
+    int64_t durationUs;
+    if (msg->findInt64("durationUs", &durationUs)) {
+        meta->setInt64(kKeyDuration, durationUs);
+    }
+
+    if (mime.startsWith("video/")) {
+        int32_t width;
+        int32_t height;
+        if (msg->findInt32("width", &width) && msg->findInt32("height", &height)) {
+            meta->setInt32(kKeyWidth, width);
+            meta->setInt32(kKeyHeight, height);
+        } else {
+            ALOGW("did not find width and/or height");
+        }
+    } else if (mime.startsWith("audio/")) {
+        int32_t numChannels;
+        if (msg->findInt32("channel-count", &numChannels)) {
+            meta->setInt32(kKeyChannelCount, numChannels);
+        }
+        int32_t sampleRate;
+        if (msg->findInt32("sample-rate", &sampleRate)) {
+            meta->setInt32(kKeySampleRate, sampleRate);
+        }
+        int32_t channelMask;
+        if (msg->findInt32("channel-mask", &channelMask)) {
+            meta->setInt32(kKeyChannelMask, channelMask);
+        }
+        int32_t delay = 0;
+        if (msg->findInt32("encoder-delay", &delay)) {
+            meta->setInt32(kKeyEncoderDelay, delay);
+        }
+        int32_t padding = 0;
+        if (msg->findInt32("encoder-padding", &padding)) {
+            meta->setInt32(kKeyEncoderPadding, padding);
+        }
+
+        int32_t isADTS;
+        if (msg->findInt32("is-adts", &isADTS)) {
+            meta->setInt32(kKeyIsADTS, isADTS);
+        }
+    }
+
+    int32_t maxInputSize;
+    if (msg->findInt32("max-input-size", &maxInputSize)) {
+        meta->setInt32(kKeyMaxInputSize, maxInputSize);
+    }
+
+    // reassemble the csd data into its original form
+    sp<ABuffer> csd0;
+    if (msg->findBuffer("csd-0", &csd0)) {
+        if (mime.startsWith("video/")) { // do we need to be stricter than this?
+            sp<ABuffer> csd1;
+            if (msg->findBuffer("csd-1", &csd1)) {
+                char avcc[1024]; // that oughta be enough, right?
+                size_t outsize = reassembleAVCC(csd0, csd1, avcc);
+                meta->setData(kKeyAVCC, kKeyAVCC, avcc, outsize);
+            }
+        } else if (mime.startsWith("audio/")) {
+            int csd0size = csd0->size();
+            char esds[csd0size + 31];
+            reassembleESDS(csd0, esds);
+            meta->setData(kKeyESDS, kKeyESDS, esds, sizeof(esds));
+        }
+    }
+
+    // XXX TODO add whatever other keys there are
+
+#if 0
+    ALOGI("converted %s to:", msg->debugString(0).c_str());
+    meta->dumpToLog();
+#endif
+}
+
+
 }  // namespace android
 
diff --git a/media/libstagefright/WAVExtractor.cpp b/media/libstagefright/WAVExtractor.cpp
index 851321d..a38400b 100644
--- a/media/libstagefright/WAVExtractor.cpp
+++ b/media/libstagefright/WAVExtractor.cpp
@@ -106,7 +106,7 @@
         return meta;
     }
 
-    meta->setCString(kKeyMIMEType, "audio/x-wav");
+    meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_CONTAINER_WAV);
 
     return meta;
 }
@@ -509,4 +509,3 @@
 }
 
 }  // namespace android
-
diff --git a/media/libstagefright/avc_utils.cpp b/media/libstagefright/avc_utils.cpp
index 65c1848..a141752 100644
--- a/media/libstagefright/avc_utils.cpp
+++ b/media/libstagefright/avc_utils.cpp
@@ -600,7 +600,7 @@
 
             bitrate = kBitrateV2[bitrate_index - 1];
             if (out_num_samples) {
-                *out_num_samples = 576;
+                *out_num_samples = (layer == 1 /* L3 */) ? 576 : 1152;
             }
         }
 
@@ -612,7 +612,8 @@
             *frame_size = 144000 * bitrate / sampling_rate + padding;
         } else {
             // V2 or V2.5
-            *frame_size = 72000 * bitrate / sampling_rate + padding;
+            size_t tmp = (layer == 1 /* L3 */) ? 72000 : 144000;
+            *frame_size = tmp * bitrate / sampling_rate + padding;
         }
     }
 
diff --git a/media/libstagefright/include/FragmentedMP4Extractor.h b/media/libstagefright/include/FragmentedMP4Extractor.h
new file mode 100644
index 0000000..763cd3a
--- /dev/null
+++ b/media/libstagefright/include/FragmentedMP4Extractor.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FRAGMENTED_MP4_EXTRACTOR_H_
+
+#define FRAGMENTED_MP4_EXTRACTOR_H_
+
+#include "include/FragmentedMP4Parser.h"
+
+#include <media/stagefright/MediaExtractor.h>
+#include <utils/Vector.h>
+#include <utils/String8.h>
+
+namespace android {
+
+struct AMessage;
+class DataSource;
+class SampleTable;
+class String8;
+
+class FragmentedMP4Extractor : public MediaExtractor {
+public:
+    // Extractor assumes ownership of "source".
+    FragmentedMP4Extractor(const sp<DataSource> &source);
+
+    virtual size_t countTracks();
+    virtual sp<MediaSource> getTrack(size_t index);
+    virtual sp<MetaData> getTrackMetaData(size_t index, uint32_t flags);
+    virtual sp<MetaData> getMetaData();
+    virtual uint32_t flags() const;
+
+protected:
+    virtual ~FragmentedMP4Extractor();
+
+private:
+    sp<ALooper> mLooper;
+    sp<FragmentedMP4Parser> mParser;
+    sp<DataSource> mDataSource;
+    status_t mInitCheck;
+    size_t mAudioTrackIndex;
+    size_t mTrackCount;
+
+    sp<MetaData> mFileMetaData;
+
+    Vector<uint32_t> mPath;
+
+    FragmentedMP4Extractor(const FragmentedMP4Extractor &);
+    FragmentedMP4Extractor &operator=(const FragmentedMP4Extractor &);
+};
+
+bool SniffFragmentedMP4(
+        const sp<DataSource> &source, String8 *mimeType, float *confidence,
+        sp<AMessage> *);
+
+}  // namespace android
+
+#endif  // MPEG4_EXTRACTOR_H_
diff --git a/media/libstagefright/include/FragmentedMP4Parser.h b/media/libstagefright/include/FragmentedMP4Parser.h
index bd8fe32..0edafb9 100644
--- a/media/libstagefright/include/FragmentedMP4Parser.h
+++ b/media/libstagefright/include/FragmentedMP4Parser.h
@@ -19,6 +19,7 @@
 #define PARSER_H_
 
 #include <media/stagefright/foundation/AHandler.h>
+#include <media/stagefright/DataSource.h>
 #include <utils/Vector.h>
 
 namespace android {
@@ -30,6 +31,7 @@
         Source() {}
 
         virtual ssize_t readAt(off64_t offset, void *data, size_t size) = 0;
+        virtual bool isSeekable() = 0;
 
         protected:
         virtual ~Source() {}
@@ -42,9 +44,12 @@
 
     void start(const char *filename);
     void start(const sp<Source> &source);
+    void start(sp<DataSource> &source);
 
-    sp<AMessage> getFormat(bool audio);
-    status_t dequeueAccessUnit(bool audio, sp<ABuffer> *accessUnit);
+    sp<AMessage> getFormat(bool audio, bool synchronous = false);
+    status_t dequeueAccessUnit(bool audio, sp<ABuffer> *accessUnit, bool synchronous = false);
+    status_t seekTo(bool audio, int64_t timeUs);
+    bool isSeekable() const;
 
     virtual void onMessageReceived(const sp<AMessage> &msg);
 
@@ -58,6 +63,7 @@
         kWhatReadMore,
         kWhatGetFormat,
         kWhatDequeueAccessUnit,
+        kWhatSeekTo,
     };
 
     struct TrackFragment;
@@ -97,6 +103,11 @@
         off64_t mOffset;
     };
 
+    struct SidxEntry {
+        size_t mSize;
+        uint32_t mDurationUs;
+    };
+
     struct TrackInfo {
         enum Flags {
             kTrackEnabled     = 0x01,
@@ -107,6 +118,7 @@
         uint32_t mTrackID;
         uint32_t mFlags;
         uint32_t mDuration;  // This is the duration in terms of movie timescale!
+        uint64_t mSidxDuration; // usec, from sidx box, which can use a different timescale
 
         uint32_t mMediaTimeScale;
 
@@ -121,6 +133,7 @@
 
         uint32_t mDecodingTime;
 
+        Vector<SidxEntry> mSidx;
         sp<StaticTrackFragment> mStaticFragment;
         List<sp<TrackFragment> > mFragments;
     };
@@ -151,6 +164,8 @@
     sp<Source> mSource;
     off_t mBufferPos;
     bool mSuspended;
+    bool mDoneWithMoov;
+    off_t mFirstMoofOffset; // used as the starting point for offsets calculated from the sidx box
     sp<ABuffer> mBuffer;
     Vector<Container> mStack;
     KeyedVector<uint32_t, TrackInfo> mTracks;  // TrackInfo by trackID
@@ -164,6 +179,7 @@
 
     status_t onProceed();
     status_t onDequeueAccessUnit(size_t trackIndex, sp<ABuffer> *accessUnit);
+    status_t onSeekTo(bool wantAudio, int64_t position);
 
     void enter(off64_t offset, uint32_t type, uint64_t size);
 
@@ -222,6 +238,9 @@
     status_t parseMediaData(
             uint32_t type, size_t offset, uint64_t size);
 
+    status_t parseSegmentIndex(
+            uint32_t type, size_t offset, uint64_t size);
+
     TrackInfo *editTrack(uint32_t trackID, bool createIfNecessary = false);
 
     ssize_t findTrack(bool wantAudio) const;
diff --git a/media/libstagefright/mp4/FragmentedMP4Parser.cpp b/media/libstagefright/mp4/FragmentedMP4Parser.cpp
index e130a80..7fe4e63 100644
--- a/media/libstagefright/mp4/FragmentedMP4Parser.cpp
+++ b/media/libstagefright/mp4/FragmentedMP4Parser.cpp
@@ -18,8 +18,8 @@
 #define LOG_TAG "FragmentedMP4Parser"
 #include <utils/Log.h>
 
-#include "include/FragmentedMP4Parser.h"
 #include "include/ESDS.h"
+#include "include/FragmentedMP4Parser.h"
 #include "TrackFragment.h"
 
 
@@ -31,6 +31,7 @@
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/Utils.h>
 
+
 namespace android {
 
 static const char *Fourcc2String(uint32_t fourcc) {
@@ -121,6 +122,8 @@
     },
 
     { FOURCC('m', 'f', 'r', 'a'), 0, NULL },
+
+    { FOURCC('s', 'i', 'd', 'x'), 0, &FragmentedMP4Parser::parseSegmentIndex },
 };
 
 struct FileSource : public FragmentedMP4Parser::Source {
@@ -134,15 +137,92 @@
         return fread(data, 1, size, mFile);
     }
 
+    virtual bool isSeekable() {
+        return true;
+    }
+
     private:
     FILE *mFile;
 
     DISALLOW_EVIL_CONSTRUCTORS(FileSource);
 };
 
+struct ReadTracker : public RefBase {
+    ReadTracker(off64_t size) {
+        allocSize = 1 + size / 8192; // 1 bit per kilobyte
+        bitmap = (char*) calloc(1, allocSize);
+    }
+    virtual ~ReadTracker() {
+        dumpToLog();
+        free(bitmap);
+    }
+    void mark(off64_t offset, size_t size) {
+        int firstbit = offset / 1024;
+        int lastbit = (offset + size - 1) / 1024;
+        for (int i = firstbit; i <= lastbit; i++) {
+            bitmap[i/8] |= (0x80 >> (i & 7));
+        }
+    }
+
+ private:
+    void dumpToLog() {
+        // 96 chars per line, each char represents one kilobyte, 1 kb per bit
+        int numlines = allocSize / 12;
+        char buf[97];
+        char *cur = bitmap;
+        for (int i = 0; i < numlines; i++ && cur) {
+            for (int j = 0; j < 12; j++) {
+                for (int k = 0; k < 8; k++) {
+                    buf[(j * 8) + k] = (*cur & (0x80 >> k)) ? 'X' : '.';
+                }
+                cur++;
+            }
+            buf[96] = '\0';
+            ALOGI("%5dk: %s", i * 96, buf);
+        }
+    }
+
+    size_t allocSize;
+    char *bitmap;
+};
+
+struct DataSourceSource : public FragmentedMP4Parser::Source {
+    DataSourceSource(sp<DataSource> &source)
+        : mDataSource(source) {
+            CHECK(mDataSource != NULL);
+#if 0
+            off64_t size;
+            if (source->getSize(&size) == OK) {
+                mReadTracker = new ReadTracker(size);
+            } else {
+                ALOGE("couldn't get data source size");
+            }
+#endif
+        }
+
+    virtual ssize_t readAt(off64_t offset, void *data, size_t size) {
+        if (mReadTracker != NULL) {
+            mReadTracker->mark(offset, size);
+        }
+        return mDataSource->readAt(offset, data, size);
+    }
+
+    virtual bool isSeekable() {
+        return true;
+    }
+
+    private:
+    sp<DataSource> mDataSource;
+    sp<ReadTracker> mReadTracker;
+
+    DISALLOW_EVIL_CONSTRUCTORS(DataSourceSource);
+};
+
 FragmentedMP4Parser::FragmentedMP4Parser()
     : mBufferPos(0),
       mSuspended(false),
+      mDoneWithMoov(false),
+      mFirstMoofOffset(0),
       mFinalResult(OK) {
 }
 
@@ -153,54 +233,142 @@
     sp<AMessage> msg = new AMessage(kWhatStart, id());
     msg->setObject("source", new FileSource(filename));
     msg->post();
+    ALOGV("Parser::start(%s)", filename);
 }
 
 void FragmentedMP4Parser::start(const sp<Source> &source) {
     sp<AMessage> msg = new AMessage(kWhatStart, id());
     msg->setObject("source", source);
     msg->post();
+    ALOGV("Parser::start(Source)");
 }
 
-sp<AMessage> FragmentedMP4Parser::getFormat(bool audio) {
-    sp<AMessage> msg = new AMessage(kWhatGetFormat, id());
-    msg->setInt32("audio", audio);
+void FragmentedMP4Parser::start(sp<DataSource> &source) {
+    sp<AMessage> msg = new AMessage(kWhatStart, id());
+    msg->setObject("source", new DataSourceSource(source));
+    msg->post();
+    ALOGV("Parser::start(DataSource)");
+}
+
+sp<AMessage> FragmentedMP4Parser::getFormat(bool audio, bool synchronous) {
+
+    while (true) {
+        bool moovDone = mDoneWithMoov;
+        sp<AMessage> msg = new AMessage(kWhatGetFormat, id());
+        msg->setInt32("audio", audio);
+
+        sp<AMessage> response;
+        status_t err = msg->postAndAwaitResponse(&response);
+
+        if (err != OK) {
+            ALOGV("getFormat post failed: %d", err);
+            return NULL;
+        }
+
+        if (response->findInt32("err", &err) && err != OK) {
+            if (synchronous && err == -EWOULDBLOCK && !moovDone) {
+                resumeIfNecessary();
+                ALOGV("@getFormat parser not ready yet, retrying");
+                usleep(10000);
+                continue;
+            }
+            ALOGV("getFormat failed: %d", err);
+            return NULL;
+        }
+
+        sp<AMessage> format;
+        CHECK(response->findMessage("format", &format));
+
+        ALOGV("returning format %s", format->debugString().c_str());
+        return format;
+    }
+}
+
+status_t FragmentedMP4Parser::seekTo(bool wantAudio, int64_t timeUs) {
+    sp<AMessage> msg = new AMessage(kWhatSeekTo, id());
+    msg->setInt32("audio", wantAudio);
+    msg->setInt64("position", timeUs);
 
     sp<AMessage> response;
     status_t err = msg->postAndAwaitResponse(&response);
-
-    if (err != OK) {
-        return NULL;
-    }
-
-    if (response->findInt32("err", &err) && err != OK) {
-        return NULL;
-    }
-
-    sp<AMessage> format;
-    CHECK(response->findMessage("format", &format));
-
-    ALOGV("returning format %s", format->debugString().c_str());
-    return format;
+    return err;
 }
 
-status_t FragmentedMP4Parser::dequeueAccessUnit(bool audio, sp<ABuffer> *accessUnit) {
-    sp<AMessage> msg = new AMessage(kWhatDequeueAccessUnit, id());
-    msg->setInt32("audio", audio);
-
-    sp<AMessage> response;
-    status_t err = msg->postAndAwaitResponse(&response);
-
-    if (err != OK) {
-        return err;
+bool FragmentedMP4Parser::isSeekable() const {
+    while (mFirstMoofOffset == 0 && mFinalResult == OK) {
+        usleep(10000);
     }
-
-    if (response->findInt32("err", &err) && err != OK) {
-        return err;
+    bool seekable = mSource->isSeekable();
+    for (size_t i = 0; seekable && i < mTracks.size(); i++) {
+        const TrackInfo *info = &mTracks.valueAt(i);
+        seekable &= !info->mSidx.empty();
     }
+    return seekable;
+}
 
-    CHECK(response->findBuffer("accessUnit", accessUnit));
+status_t FragmentedMP4Parser::onSeekTo(bool wantAudio, int64_t position) {
+    status_t err = -EINVAL;
+    ssize_t trackIndex = findTrack(wantAudio);
+    if (trackIndex < 0) {
+        err = trackIndex;
+    } else {
+        TrackInfo *info = &mTracks.editValueAt(trackIndex);
 
-    return OK;
+        int numSidxEntries = info->mSidx.size();
+        int64_t totalTime = 0;
+        off_t totalOffset = mFirstMoofOffset;
+        for (int i = 0; i < numSidxEntries; i++) {
+            const SidxEntry *se = &info->mSidx[i];
+            totalTime += se->mDurationUs;
+            if (totalTime > position) {
+                mBuffer->setRange(0,0);
+                mBufferPos = totalOffset;
+                if (mFinalResult == ERROR_END_OF_STREAM) {
+                    mFinalResult = OK;
+                    mSuspended = true; // force resume
+                    resumeIfNecessary();
+                }
+                info->mFragments.clear();
+                info->mDecodingTime = position * info->mMediaTimeScale / 1000000ll;
+                return OK;
+            }
+            totalOffset += se->mSize;
+        }
+    }
+    ALOGV("seekTo out of range");
+    return err;
+}
+
+status_t FragmentedMP4Parser::dequeueAccessUnit(bool audio, sp<ABuffer> *accessUnit,
+                                                bool synchronous) {
+
+    while (true) {
+        sp<AMessage> msg = new AMessage(kWhatDequeueAccessUnit, id());
+        msg->setInt32("audio", audio);
+
+        sp<AMessage> response;
+        status_t err = msg->postAndAwaitResponse(&response);
+
+        if (err != OK) {
+            ALOGV("dequeue fail 1: %d", err);
+            return err;
+        }
+
+        if (response->findInt32("err", &err) && err != OK) {
+            if (synchronous && err == -EWOULDBLOCK) {
+                resumeIfNecessary();
+                ALOGV("Parser not ready yet, retrying");
+                usleep(10000);
+                continue;
+            }
+            ALOGV("dequeue fail 2: %d, %d", err, synchronous);
+            return err;
+        }
+
+        CHECK(response->findBuffer("accessUnit", accessUnit));
+
+        return OK;
+    }
 }
 
 ssize_t FragmentedMP4Parser::findTrack(bool wantAudio) const {
@@ -272,7 +440,7 @@
             size_t maxBytesToRead = mBuffer->capacity() - mBuffer->size();
 
             if (maxBytesToRead < needed) {
-                ALOGI("resizing buffer.");
+                ALOGV("resizing buffer.");
 
                 sp<ABuffer> newBuffer =
                     new ABuffer((mBuffer->size() + needed + 1023) & ~1023);
@@ -290,7 +458,7 @@
                     mBuffer->data() + mBuffer->size(), needed);
 
             if (n < (ssize_t)needed) {
-                ALOGI("%s", "Reached EOF");
+                ALOGV("Reached EOF when reading %d @ %d + %d", needed, mBufferPos, mBuffer->size());
                 if (n < 0) {
                     mFinalResult = n;
                 } else if (n == 0) {
@@ -321,8 +489,16 @@
             } else {
                 TrackInfo *info = &mTracks.editValueAt(trackIndex);
 
+                sp<AMessage> format = info->mSampleDescs.itemAt(0).mFormat;
+                if (info->mSidxDuration) {
+                    format->setInt64("durationUs", info->mSidxDuration);
+                } else {
+                    // this is probably going to be zero. Oh well...
+                    format->setInt64("durationUs",
+                                     1000000ll * info->mDuration / info->mMediaTimeScale);
+                }
                 response->setMessage(
-                        "format", info->mSampleDescs.itemAt(0).mFormat);
+                        "format", format);
 
                 err = OK;
             }
@@ -366,6 +542,30 @@
             break;
         }
 
+        case kWhatSeekTo:
+        {
+            ALOGV("kWhatSeekTo");
+            int32_t wantAudio;
+            CHECK(msg->findInt32("audio", &wantAudio));
+            int64_t position;
+            CHECK(msg->findInt64("position", &position));
+
+            status_t err = -EWOULDBLOCK;
+            sp<AMessage> response = new AMessage;
+
+            ssize_t trackIndex = findTrack(wantAudio);
+
+            if (trackIndex < 0) {
+                err = trackIndex;
+            } else {
+                err = onSeekTo(wantAudio, position);
+            }
+            response->setInt32("err", err);
+            uint32_t replyID;
+            CHECK(msg->senderAwaitsResponse(&replyID));
+            response->postReply(replyID);
+            break;
+        }
         default:
             TRESPASS();
     }
@@ -429,6 +629,12 @@
     if ((i < kNumDispatchers && kDispatchTable[i].mHandler == 0)
             || isSampleEntryBox || ptype == FOURCC('i', 'l', 's', 't')) {
         // This is a container box.
+        if (type == FOURCC('m', 'o', 'o', 'f')) {
+            if (mFirstMoofOffset == 0) {
+                ALOGV("first moof @ %08x", mBufferPos + offset);
+                mFirstMoofOffset = mBufferPos + offset - 8; // point at the size
+            }
+        }
         if (type == FOURCC('m', 'e', 't', 'a')) {
             if ((err = need(offset + 4)) < OK) {
                 return err;
@@ -589,7 +795,7 @@
         return;
     }
 
-    ALOGI("resuming.");
+    ALOGV("resuming.");
 
     mSuspended = false;
     (new AMessage(kWhatProceed, id()))->post();
@@ -647,7 +853,7 @@
 
         int cmp = CompareSampleLocation(sampleInfo, mdatInfo);
 
-        if (cmp < 0) {
+        if (cmp < 0 && !mSource->isSeekable()) {
             return -EPIPE;
         } else if (cmp == 0) {
             if (i > 0) {
@@ -669,6 +875,8 @@
         size_t numDroppable = 0;
         bool done = false;
 
+        // XXX FIXME: if one of the tracks is not advanced (e.g. if you play an audio+video
+        // file with sf2), then mMediaData will not be pruned and keeps growing
         for (size_t i = 0; !done && i < mMediaData.size(); ++i) {
             const MediaDataInfo &mdatInfo = mMediaData.itemAt(i);
 
@@ -896,6 +1104,8 @@
 
                     static_cast<DynamicTrackFragment *>(
                             fragment.get())->signalCompletion();
+                } else if (container->mType == FOURCC('m', 'o', 'o', 'v')) {
+                    mDoneWithMoov = true;
                 }
 
                 container = NULL;
@@ -953,6 +1163,10 @@
     TrackInfo *info = editTrack(trackID, true /* createIfNecessary */);
     info->mFlags = flags;
     info->mDuration = duration;
+    if (info->mDuration == 0xffffffff) {
+        // ffmpeg sets this to -1, which is incorrect.
+        info->mDuration = 0;
+    }
 
     info->mStaticFragment = new StaticTrackFragment;
 
@@ -1363,13 +1577,100 @@
     info->mOffset = mBufferPos + offset;
 
     if (mMediaData.size() > 10) {
-        ALOGI("suspending for now.");
+        ALOGV("suspending for now.");
         mSuspended = true;
     }
 
     return OK;
 }
 
+status_t FragmentedMP4Parser::parseSegmentIndex(
+        uint32_t type, size_t offset, uint64_t size) {
+    ALOGV("sidx box type %d, offset %d, size %d", type, int(offset), int(size));
+//    AString sidxstr;
+//    hexdump(mBuffer->data() + offset, size, 0 /* indent */, &sidxstr);
+//    ALOGV("raw sidx:");
+//    ALOGV("%s", sidxstr.c_str());
+    if (offset + 12 > size) {
+        return -EINVAL;
+    }
+
+    uint32_t flags = readU32(offset);
+
+    uint32_t version = flags >> 24;
+    flags &= 0xffffff;
+
+    ALOGV("sidx version %d", version);
+
+    uint32_t referenceId = readU32(offset + 4);
+    uint32_t timeScale = readU32(offset + 8);
+    ALOGV("sidx refid/timescale: %d/%d", referenceId, timeScale);
+
+    uint64_t earliestPresentationTime;
+    uint64_t firstOffset;
+
+    offset += 12;
+
+    if (version == 0) {
+        if (offset + 8 > size) {
+            return -EINVAL;
+        }
+        earliestPresentationTime = readU32(offset);
+        firstOffset = readU32(offset + 4);
+        offset += 8;
+    } else {
+        if (offset + 16 > size) {
+            return -EINVAL;
+        }
+        earliestPresentationTime = readU64(offset);
+        firstOffset = readU64(offset + 8);
+        offset += 16;
+    }
+    ALOGV("sidx pres/off: %Ld/%Ld", earliestPresentationTime, firstOffset);
+
+    if (offset + 4 > size) {
+        return -EINVAL;
+    }
+    if (readU16(offset) != 0) { // reserved
+        return -EINVAL;
+    }
+    int32_t referenceCount = readU16(offset + 2);
+    offset += 4;
+    ALOGV("refcount: %d", referenceCount);
+
+    if (offset + referenceCount * 12 > size) {
+        return -EINVAL;
+    }
+
+    TrackInfo *info = editTrack(mCurrentTrackID);
+    uint64_t total_duration = 0;
+    for (int i = 0; i < referenceCount; i++) {
+        uint32_t d1 = readU32(offset);
+        uint32_t d2 = readU32(offset + 4);
+        uint32_t d3 = readU32(offset + 8);
+
+        if (d1 & 0x80000000) {
+            ALOGW("sub-sidx boxes not supported yet");
+        }
+        bool sap = d3 & 0x80000000;
+        bool saptype = d3 >> 28;
+        if (!sap || saptype > 2) {
+            ALOGW("not a stream access point, or unsupported type");
+        }
+        total_duration += d2;
+        offset += 12;
+        ALOGV(" item %d, %08x %08x %08x", i, d1, d2, d3);
+        SidxEntry se;
+        se.mSize = d1 & 0x7fffffff;
+        se.mDurationUs = 1000000LL * d2 / timeScale;
+        info->mSidx.add(se);
+    }
+
+    info->mSidxDuration = total_duration * 1000000 / timeScale;
+    ALOGV("duration: %lld", info->mSidxDuration);
+    return OK;
+}
+
 status_t FragmentedMP4Parser::parseTrackExtends(
         uint32_t type, size_t offset, uint64_t size) {
     if (offset + 24 > size) {
@@ -1407,6 +1708,7 @@
     info.mTrackID = trackID;
     info.mFlags = 0;
     info.mDuration = 0xffffffff;
+    info.mSidxDuration = 0;
     info.mMediaTimeScale = 0;
     info.mMediaHandlerType = 0;
     info.mDefaultSampleDescriptionIndex = 0;
diff --git a/media/libstagefright/wifi-display/ANetworkSession.cpp b/media/libstagefright/wifi-display/ANetworkSession.cpp
index 4ddd778..435e72f 100644
--- a/media/libstagefright/wifi-display/ANetworkSession.cpp
+++ b/media/libstagefright/wifi-display/ANetworkSession.cpp
@@ -331,11 +331,15 @@
         notify->post();
 
 #if 1
-        // XXX The dongle sends the wrong content length header on a
+        // XXX The (old) dongle sends the wrong content length header on a
         // SET_PARAMETER request that signals a "wfd_idr_request".
         // (17 instead of 19).
         const char *content = msg->getContent();
-        if (content && !memcmp(content, "wfd_idr_request\r\n", 17)) {
+        if (content
+                && !memcmp(content, "wfd_idr_request\r\n", 17)
+                && length >= 19
+                && mInBuffer.c_str()[length] == '\r'
+                && mInBuffer.c_str()[length + 1] == '\n') {
             length += 2;
         }
 #endif
@@ -424,8 +428,10 @@
     status_t err = OK;
 
     if (n > 0) {
+#if 0
         ALOGI("out:");
         hexdump(mOutBuffer.c_str(), n);
+#endif
 
         mOutBuffer.erase(0, n);
     } else if (n < 0) {
diff --git a/media/libstagefright/wifi-display/source/PlaybackSession.cpp b/media/libstagefright/wifi-display/source/PlaybackSession.cpp
index 6c01c7b..abd7ec3 100644
--- a/media/libstagefright/wifi-display/source/PlaybackSession.cpp
+++ b/media/libstagefright/wifi-display/source/PlaybackSession.cpp
@@ -42,7 +42,6 @@
 #include <media/stagefright/MPEG2TSWriter.h>
 #include <media/stagefright/SurfaceMediaSource.h>
 #include <media/stagefright/Utils.h>
-#include <ui/DisplayInfo.h>
 
 #include <OMX_IVCommon.h>
 
@@ -598,10 +597,6 @@
     mCodecLooper = new ALooper;
     mCodecLooper->start();
 
-    DisplayInfo info;
-    SurfaceComposerClient::getDisplayInfo(0, &info);
-
-    // sp<SurfaceMediaSource> source = new SurfaceMediaSource(info.w, info.h);
     sp<SurfaceMediaSource> source = new SurfaceMediaSource(width(), height());
 
 #if 0
@@ -642,7 +637,8 @@
     CHECK(service != NULL);
 
     // Add one reference to account for the serializer.
-    err = source->setMaxAcquiredBufferCount(numInputBuffers + 1);
+    // Add another two for unknown reasons.
+    err = source->setMaxAcquiredBufferCount(31 /* numInputBuffers + 1 */);
     CHECK_EQ(err, (status_t)OK);
 
     mBufferQueue = source->getBufferQueue();
@@ -650,7 +646,6 @@
     if (mLegacyMode) {
         service->connectDisplay(mBufferQueue);
     }
-#endif
 
 #if 0
     sp<AudioSource> audioSource = new AudioSource(
@@ -658,24 +653,27 @@
             48000 /* sampleRate */,
             2 /* channelCount */);  // XXX AUDIO_CHANNEL_IN_STEREO?
 
-    CHECK_EQ((status_t)OK, audioSource->initCheck());
+    if (audioSource->initCheck() == OK) {
+        audioSource->setUseLooperTime(true);
 
-    audioSource->setUseLooperTime(true);
+        index = mSerializer->addSource(audioSource);
+        CHECK_GE(index, 0);
 
-    index = mSerializer->addSource(audioSource);
-    CHECK_GE(index, 0);
+        sp<AMessage> audioFormat;
+        err = convertMetaDataToMessage(audioSource->getFormat(), &audioFormat);
+        CHECK_EQ(err, (status_t)OK);
 
-    sp<AMessage> audioFormat;
-    err = convertMetaDataToMessage(audioSource->getFormat(), &audioFormat);
-    CHECK_EQ(err, (status_t)OK);
+        sp<AMessage> audioNotify = new AMessage(kWhatConverterNotify, id());
+        audioNotify->setSize("trackIndex", index);
 
-    sp<AMessage> audioNotify = new AMessage(kWhatConverterNotify, id());
-    audioNotify->setSize("trackIndex", index);
+        converter = new Converter(audioNotify, mCodecLooper, audioFormat);
+        looper()->registerHandler(converter);
 
-    converter = new Converter(audioNotify, mCodecLooper, audioFormat);
-    looper()->registerHandler(converter);
-
-    mTracks.add(index, new Track(converter));
+        mTracks.add(index, new Track(converter));
+    } else {
+        ALOGW("Unable to instantiate audio source");
+    }
+#endif
 #endif
 
     return OK;
@@ -686,11 +684,11 @@
 }
 
 int32_t WifiDisplaySource::PlaybackSession::width() const {
-    return 720;
+    return mLegacyMode ? 720 : 1280;
 }
 
 int32_t WifiDisplaySource::PlaybackSession::height() const {
-    return 1280;
+    return mLegacyMode ? 1280 : 720;
 }
 
 void WifiDisplaySource::PlaybackSession::scheduleSendSR() {
diff --git a/media/libstagefright/wifi-display/source/WifiDisplaySource.cpp b/media/libstagefright/wifi-display/source/WifiDisplaySource.cpp
index 0786f2b..8e8f04a 100644
--- a/media/libstagefright/wifi-display/source/WifiDisplaySource.cpp
+++ b/media/libstagefright/wifi-display/source/WifiDisplaySource.cpp
@@ -14,7 +14,7 @@
  * limitations under the License.
  */
 
-#define LOG_NDEBUG 0
+//#define LOG_NDEBUG 0
 #define LOG_TAG "WifiDisplaySource"
 #include <utils/Log.h>
 
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 14f74b5..a5d4c6c 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -291,6 +291,14 @@
         for (size_t i = 0; i < ARRAY_SIZE(audio_interfaces); i++) {
             loadHwModule_l(audio_interfaces[i]);
         }
+        // then try to find a module supporting the requested device.
+        for (size_t i = 0; i < mAudioHwDevs.size(); i++) {
+            AudioHwDevice *audioHwDevice = mAudioHwDevs.valueAt(i);
+            audio_hw_device_t *dev = audioHwDevice->hwDevice();
+            if ((dev->get_supported_devices != NULL) &&
+                    (dev->get_supported_devices(dev) & devices) == devices)
+                return audioHwDevice;
+        }
     } else {
         // check a match for the requested module handle
         AudioHwDevice *audioHwDevice = mAudioHwDevs.valueFor(module);
@@ -298,13 +306,6 @@
             return audioHwDevice;
         }
     }
-    // then try to find a module supporting the requested device.
-    for (size_t i = 0; i < mAudioHwDevs.size(); i++) {
-        AudioHwDevice *audioHwDevice = mAudioHwDevs.valueAt(i);
-        audio_hw_device_t *dev = audioHwDevice->hwDevice();
-        if ((dev->get_supported_devices(dev) & devices) == devices)
-            return audioHwDevice;
-    }
 
     return NULL;
 }
@@ -884,7 +885,7 @@
             if (mBtNrecIsOff != btNrecIsOff) {
                 for (size_t i = 0; i < mRecordThreads.size(); i++) {
                     sp<RecordThread> thread = mRecordThreads.valueAt(i);
-                    audio_devices_t device = thread->device() & AUDIO_DEVICE_IN_ALL;
+                    audio_devices_t device = thread->inDevice();
                     bool suspend = audio_is_bluetooth_sco_device(device) && btNrecIsOff;
                     // collect all of the thread's session IDs
                     KeyedVector<int, bool> ids = thread->sessionIds();
@@ -1133,7 +1134,7 @@
 // ----------------------------------------------------------------------------
 
 AudioFlinger::ThreadBase::ThreadBase(const sp<AudioFlinger>& audioFlinger, audio_io_handle_t id,
-        audio_devices_t device, type_t type)
+        audio_devices_t outDevice, audio_devices_t inDevice, type_t type)
     :   Thread(false /*canCallJava*/),
         mType(type),
         mAudioFlinger(audioFlinger), mSampleRate(0), mFrameCount(0), mNormalFrameCount(0),
@@ -1141,7 +1142,8 @@
         mChannelCount(0),
         mFrameSize(1), mFormat(AUDIO_FORMAT_INVALID),
         mParamStatus(NO_ERROR),
-        mStandby(false), mDevice(device), mId(id),
+        mStandby(false), mOutDevice(outDevice), mInDevice(inDevice),
+        mAudioSource(AUDIO_SOURCE_DEFAULT), mId(id),
         // mName will be set by concrete (non-virtual) subclass
         mDeathRecipient(new PMDeathRecipient(this))
 {
@@ -1518,7 +1520,7 @@
                                              audio_io_handle_t id,
                                              audio_devices_t device,
                                              type_t type)
-    :   ThreadBase(audioFlinger, id, device, type),
+    :   ThreadBase(audioFlinger, id, device, AUDIO_DEVICE_NONE, type),
         mMixBuffer(NULL), mSuspended(0), mBytesWritten(0),
         // mStreamTypes[] initialized in constructor body
         mOutput(output),
@@ -3451,7 +3453,7 @@
 #ifdef ADD_BATTERY_DATA
             // when changing the audio output device, call addBatteryData to notify
             // the change
-            if (mDevice != value) {
+            if (mOutDevice != value) {
                 uint32_t params = 0;
                 // check whether speaker is on
                 if (value & AUDIO_DEVICE_OUT_SPEAKER) {
@@ -3473,9 +3475,9 @@
 
             // forward device change to effects that have requested to be
             // aware of attached audio device.
-            mDevice = value;
+            mOutDevice = value;
             for (size_t i = 0; i < mEffectChains.size(); i++) {
-                mEffectChains[i]->setDevice_l(mDevice);
+                mEffectChains[i]->setDevice_l(mOutDevice);
             }
         }
 
@@ -3930,7 +3932,7 @@
 
 AudioFlinger::DuplicatingThread::DuplicatingThread(const sp<AudioFlinger>& audioFlinger,
         AudioFlinger::MixerThread* mainThread, audio_io_handle_t id)
-    :   MixerThread(audioFlinger, mainThread->getOutput(), id, mainThread->device(), DUPLICATING),
+    :   MixerThread(audioFlinger, mainThread->getOutput(), id, mainThread->outDevice(), DUPLICATING),
         mWaitTimeMs(UINT_MAX)
 {
     addOutputTrack(mainThread);
@@ -5936,7 +5938,7 @@
                                          audio_channel_mask_t channelMask,
                                          audio_io_handle_t id,
                                          audio_devices_t device) :
-    ThreadBase(audioFlinger, id, device, RECORD),
+    ThreadBase(audioFlinger, id, AUDIO_DEVICE_NONE, device, RECORD),
     mInput(input), mResampler(NULL), mRsmpOutBuffer(NULL), mRsmpInBuffer(NULL),
     // mRsmpInIndex and mInputBytes set by readInputParameters()
     mReqChannelCount(popcount(channelMask)),
@@ -6215,7 +6217,7 @@
         mTracks.add(track);
 
         // disable AEC and NS if the device is a BT SCO headset supporting those pre processings
-        bool suspend = audio_is_bluetooth_sco_device(mDevice & AUDIO_DEVICE_IN_ALL) &&
+        bool suspend = audio_is_bluetooth_sco_device(mInDevice) &&
                         mAudioFlinger->btNrecIsOff();
         setEffectSuspended_l(FX_IID_AEC, suspend, sessionId);
         setEffectSuspended_l(FX_IID_NS, suspend, sessionId);
@@ -6568,19 +6570,19 @@
             for (size_t i = 0; i < mEffectChains.size(); i++) {
                 mEffectChains[i]->setDevice_l(value);
             }
+
             // store input device and output device but do not forward output device to audio HAL.
             // Note that status is ignored by the caller for output device
             // (see AudioFlinger::setParameters()
-            audio_devices_t newDevice = mDevice;
-            if (value & AUDIO_DEVICE_OUT_ALL) {
-                newDevice &= ~(value & AUDIO_DEVICE_OUT_ALL);
+            if (audio_is_output_devices(value)) {
+                mOutDevice = value;
                 status = BAD_VALUE;
             } else {
-                newDevice &= ~(value & AUDIO_DEVICE_IN_ALL);
+                mInDevice = value;
                 // disable AEC and NS if the device is a BT SCO headset supporting those pre processings
                 if (mTracks.size() > 0) {
-                    bool suspend = audio_is_bluetooth_sco_device(
-                            (audio_devices_t)value) && mAudioFlinger->btNrecIsOff();
+                    bool suspend = audio_is_bluetooth_sco_device(mInDevice) &&
+                                        mAudioFlinger->btNrecIsOff();
                     for (size_t i = 0; i < mTracks.size(); i++) {
                         sp<RecordTrack> track = mTracks[i];
                         setEffectSuspended_l(FX_IID_AEC, suspend, track->sessionId());
@@ -6588,8 +6590,15 @@
                     }
                 }
             }
-            newDevice |= value;
-            mDevice = newDevice;    // since mDevice is read by other threads, only write to it once
+        }
+        if (param.getInt(String8(AudioParameter::keyInputSource), value) == NO_ERROR &&
+                mAudioSource != (audio_source_t)value) {
+            // forward device change to effects that have requested to be
+            // aware of attached audio device.
+            for (size_t i = 0; i < mEffectChains.size(); i++) {
+                mEffectChains[i]->setAudioSource_l((audio_source_t)value);
+            }
+            mAudioSource = (audio_source_t)value;
         }
         if (status == NO_ERROR) {
             status = mInput->stream->common.set_parameters(&mInput->stream->common, keyValuePair.string());
@@ -7322,7 +7331,7 @@
         return 0;
     }
 
-    return thread->device();
+    return thread->outDevice();
 }
 
 sp<AudioFlinger::SyncEvent> AudioFlinger::createSyncEvent(AudioSystem::sync_event_t type,
@@ -7735,8 +7744,10 @@
             }
             effectCreated = true;
 
-            effect->setDevice(mDevice);
+            effect->setDevice(mOutDevice);
+            effect->setDevice(mInDevice);
             effect->setMode(mAudioFlinger->getMode());
+            effect->setAudioSource(mAudioSource);
         }
         // create effect handle and connect it to effect module
         handle = new EffectHandle(effect, client, effectClient, priority);
@@ -7812,8 +7823,10 @@
         return status;
     }
 
-    effect->setDevice(mDevice);
+    effect->setDevice(mOutDevice);
+    effect->setDevice(mInDevice);
     effect->setMode(mAudioFlinger->getMode());
+    effect->setAudioSource(mAudioSource);
     return NO_ERROR;
 }
 
@@ -8650,44 +8663,23 @@
 
 status_t AudioFlinger::EffectModule::setDevice(audio_devices_t device)
 {
+    if (device == AUDIO_DEVICE_NONE) {
+        return NO_ERROR;
+    }
+
     Mutex::Autolock _l(mLock);
     status_t status = NO_ERROR;
     if (device && (mDescriptor.flags & EFFECT_FLAG_DEVICE_MASK) == EFFECT_FLAG_DEVICE_IND) {
-        // audio pre processing modules on RecordThread can receive both output and
-        // input device indication in the same call
-        audio_devices_t dev = device & AUDIO_DEVICE_OUT_ALL;
-        if (dev) {
-            status_t cmdStatus;
-            uint32_t size = sizeof(status_t);
-
-            status = (*mEffectInterface)->command(mEffectInterface,
-                                                  EFFECT_CMD_SET_DEVICE,
-                                                  sizeof(uint32_t),
-                                                  &dev,
-                                                  &size,
-                                                  &cmdStatus);
-            if (status == NO_ERROR) {
-                status = cmdStatus;
-            }
-        }
-        dev = device & AUDIO_DEVICE_IN_ALL;
-        if (dev) {
-            status_t cmdStatus;
-            uint32_t size = sizeof(status_t);
-
-            status_t status2 = (*mEffectInterface)->command(mEffectInterface,
-                                                  EFFECT_CMD_SET_INPUT_DEVICE,
-                                                  sizeof(uint32_t),
-                                                  &dev,
-                                                  &size,
-                                                  &cmdStatus);
-            if (status2 == NO_ERROR) {
-                status2 = cmdStatus;
-            }
-            if (status == NO_ERROR) {
-                status = status2;
-            }
-        }
+        status_t cmdStatus;
+        uint32_t size = sizeof(status_t);
+        uint32_t cmd = audio_is_output_devices(device) ? EFFECT_CMD_SET_DEVICE :
+                            EFFECT_CMD_SET_INPUT_DEVICE;
+        status = (*mEffectInterface)->command(mEffectInterface,
+                                              cmd,
+                                              sizeof(uint32_t),
+                                              &device,
+                                              &size,
+                                              &cmdStatus);
     }
     return status;
 }
@@ -8712,6 +8704,22 @@
     return status;
 }
 
+status_t AudioFlinger::EffectModule::setAudioSource(audio_source_t source)
+{
+    Mutex::Autolock _l(mLock);
+    status_t status = NO_ERROR;
+    if ((mDescriptor.flags & EFFECT_FLAG_AUDIO_SOURCE_MASK) == EFFECT_FLAG_AUDIO_SOURCE_IND) {
+        uint32_t size = 0;
+        status = (*mEffectInterface)->command(mEffectInterface,
+                                              EFFECT_CMD_SET_AUDIO_SOURCE,
+                                              sizeof(audio_source_t),
+                                              &source,
+                                              &size,
+                                              NULL);
+    }
+    return status;
+}
+
 void AudioFlinger::EffectModule::setSuspended(bool suspended)
 {
     Mutex::Autolock _l(mLock);
@@ -9387,6 +9395,15 @@
     }
 }
 
+// setAudioSource_l() must be called with PlaybackThread::mLock held
+void AudioFlinger::EffectChain::setAudioSource_l(audio_source_t source)
+{
+    size_t size = mEffects.size();
+    for (size_t i = 0; i < size; i++) {
+        mEffects[i]->setAudioSource(source);
+    }
+}
+
 // setVolume_l() must be called with PlaybackThread::mLock held
 bool AudioFlinger::EffectChain::setVolume_l(uint32_t *left, uint32_t *right)
 {
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index 4723cd9..5ffa5a6 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -353,7 +353,8 @@
             RECORD              // Thread class is RecordThread
         };
 
-        ThreadBase (const sp<AudioFlinger>& audioFlinger, audio_io_handle_t id, audio_devices_t device, type_t type);
+        ThreadBase (const sp<AudioFlinger>& audioFlinger, audio_io_handle_t id,
+                    audio_devices_t outDevice, audio_devices_t inDevice, type_t type);
         virtual             ~ThreadBase();
 
         void dumpBase(int fd, const Vector<String16>& args);
@@ -519,9 +520,10 @@
                     void        sendConfigEvent_l(int event, int param = 0);
                     void        processConfigEvents();
 
-                    // see note at declaration of mStandby and mDevice
+                    // see note at declaration of mStandby, mOutDevice and mInDevice
                     bool        standby() const { return mStandby; }
-                    audio_devices_t device() const { return mDevice; }
+                    audio_devices_t outDevice() const { return mOutDevice; }
+                    audio_devices_t inDevice() const { return mInDevice; }
 
         virtual     audio_stream_t* stream() const = 0;
 
@@ -667,13 +669,15 @@
                     Vector<ConfigEvent>     mConfigEvents;
 
                     // These fields are written and read by thread itself without lock or barrier,
-                    // and read by other threads without lock or barrier via standby() and device().
+                    // and read by other threads without lock or barrier via standby() , outDevice()
+                    // and inDevice().
                     // Because of the absence of a lock or barrier, any other thread that reads
                     // these fields must use the information in isolation, or be prepared to deal
                     // with possibility that it might be inconsistent with other information.
                     bool                    mStandby;   // Whether thread is currently in standby.
-                    audio_devices_t         mDevice;    // output device for PlaybackThread
-                                                        // input + output devices for RecordThread
+                    audio_devices_t         mOutDevice;   // output device
+                    audio_devices_t         mInDevice;    // input device
+                    audio_source_t          mAudioSource; // (see audio.h, audio_source_t)
 
                     const audio_io_handle_t mId;
                     Vector< sp<EffectChain> > mEffectChains;
@@ -1606,6 +1610,7 @@
         status_t         setDevice(audio_devices_t device);
         status_t         setVolume(uint32_t *left, uint32_t *right, bool controller);
         status_t         setMode(audio_mode_t mode);
+        status_t         setAudioSource(audio_source_t source);
         status_t         start();
         status_t         stop();
         void             setSuspended(bool suspended);
@@ -1768,6 +1773,7 @@
         bool setVolume_l(uint32_t *left, uint32_t *right);
         void setDevice_l(audio_devices_t device);
         void setMode_l(audio_mode_t mode);
+        void setAudioSource_l(audio_source_t source);
 
         void setInBuffer(int16_t *buffer, bool ownsBuffer = false) {
             mInBuffer = buffer;
diff --git a/services/camera/libcameraservice/Android.mk b/services/camera/libcameraservice/Android.mk
index 1370c62..c7927fe 100644
--- a/services/camera/libcameraservice/Android.mk
+++ b/services/camera/libcameraservice/Android.mk
@@ -14,8 +14,12 @@
     camera2/CameraMetadata.cpp \
     camera2/Parameters.cpp \
     camera2/FrameProcessor.cpp \
-    camera2/CaptureProcessor.cpp \
-    camera2/CallbackProcessor.cpp
+    camera2/JpegProcessor.cpp \
+    camera2/CallbackProcessor.cpp \
+    camera2/ZslProcessor.cpp \
+    camera2/BurstCapture.cpp \
+    camera2/JpegCompressor.cpp \
+    camera2/CaptureSequencer.cpp
 
 LOCAL_SHARED_LIBRARIES:= \
     libui \
@@ -28,10 +32,12 @@
     libgui \
     libhardware \
     libsync \
-    libcamera_metadata
+    libcamera_metadata \
+    libjpeg
 
 LOCAL_C_INCLUDES += \
-    system/media/camera/include
+    system/media/camera/include \
+    external/jpeg
 
 LOCAL_MODULE:= libcameraservice
 
diff --git a/services/camera/libcameraservice/Camera2Client.cpp b/services/camera/libcameraservice/Camera2Client.cpp
index acd290d..90355be 100644
--- a/services/camera/libcameraservice/Camera2Client.cpp
+++ b/services/camera/libcameraservice/Camera2Client.cpp
@@ -25,14 +25,13 @@
 #include <gui/SurfaceTextureClient.h>
 #include <gui/Surface.h>
 #include <media/hardware/MetadataBufferType.h>
-
+#include "camera2/Parameters.h"
 #include "Camera2Client.h"
 
 #define ALOG1(...) ALOGD_IF(gLogLevel >= 1, __VA_ARGS__);
 #define ALOG2(...) ALOGD_IF(gLogLevel >= 2, __VA_ARGS__);
 
 namespace android {
-
 using namespace camera2;
 
 static int getCallingPid() {
@@ -59,12 +58,21 @@
         mRecordingHeapCount(kDefaultRecordingHeapCount)
 {
     ATRACE_CALL();
-    ALOGV("%s: Created client for camera %d", __FUNCTION__, cameraId);
+    ALOGI("Camera %d: Opened", cameraId);
 
     mDevice = new Camera2Device(cameraId);
 
     SharedParameters::Lock l(mParameters);
     l.mParameters.state = Parameters::DISCONNECTED;
+
+    char value[PROPERTY_VALUE_MAX];
+    property_get("camera.zsl_mode", value, "0");
+    if (!strcmp(value,"1")) {
+        ALOGI("Camera %d: Enabling ZSL mode", cameraId);
+        l.mParameters.zslMode = true;
+    } else {
+        l.mParameters.zslMode = false;
+    }
 }
 
 status_t Camera2Client::checkPid(const char* checkLocation) const {
@@ -100,20 +108,32 @@
         return NO_INIT;
     }
 
-    mFrameProcessor = new FrameProcessor(this);
-    String8 frameThreadName = String8::format("Camera2Client[%d]::FrameProcessor",
-            mCameraId);
-    mFrameProcessor->run(frameThreadName.string());
+    String8 threadName;
 
-    mCaptureProcessor = new CaptureProcessor(this);
-    String8 captureThreadName =
-            String8::format("Camera2Client[%d]::CaptureProcessor", mCameraId);
-    mCaptureProcessor->run(captureThreadName.string());
+    mFrameProcessor = new FrameProcessor(this);
+    threadName = String8::format("Camera2Client[%d]::FrameProcessor",
+            mCameraId);
+    mFrameProcessor->run(threadName.string());
+
+    mCaptureSequencer = new CaptureSequencer(this);
+    threadName = String8::format("Camera2Client[%d]::CaptureSequencer",
+            mCameraId);
+    mCaptureSequencer->run(threadName.string());
+
+    mJpegProcessor = new JpegProcessor(this, mCaptureSequencer);
+    threadName = String8::format("Camera2Client[%d]::JpegProcessor",
+            mCameraId);
+    mJpegProcessor->run(threadName.string());
+
+    mZslProcessor = new ZslProcessor(this, mCaptureSequencer);
+    threadName = String8::format("Camera2Client[%d]::ZslProcessor",
+            mCameraId);
+    mZslProcessor->run(threadName.string());
 
     mCallbackProcessor = new CallbackProcessor(this);
-    String8 callbackThreadName =
-            String8::format("Camera2Client[%d]::CallbackProcessor", mCameraId);
-    mCallbackProcessor->run(callbackThreadName.string());
+    threadName = String8::format("Camera2Client[%d]::CallbackProcessor",
+            mCameraId);
+    mCallbackProcessor->run(threadName.string());
 
     if (gLogLevel >= 1) {
         ALOGD("%s: Default parameters converted from camera %d:", __FUNCTION__,
@@ -126,7 +146,7 @@
 
 Camera2Client::~Camera2Client() {
     ATRACE_CALL();
-    ALOGV("%s: Camera %d: Shutting down client.", __FUNCTION__, mCameraId);
+    ALOGV("Camera %d: Shutting down", mCameraId);
 
     mDestructionStarted = true;
 
@@ -135,7 +155,12 @@
     disconnect();
 
     mFrameProcessor->requestExit();
-    ALOGV("%s: Camera %d: Shutdown complete", __FUNCTION__, mCameraId);
+    mCaptureSequencer->requestExit();
+    mJpegProcessor->requestExit();
+    mZslProcessor->requestExit();
+    mCallbackProcessor->requestExit();
+
+    ALOGI("Camera %d: Closed", mCameraId);
 }
 
 status_t Camera2Client::dump(int fd, const Vector<String16>& args) {
@@ -299,10 +324,12 @@
             p.videoStabilization ? "enabled" : "disabled");
 
     result.append("  Current streams:\n");
-    result.appendFormat("    Preview stream ID: %d\n", mPreviewStreamId);
+    result.appendFormat("    Preview stream ID: %d\n",
+            getPreviewStreamId());
     result.appendFormat("    Capture stream ID: %d\n",
-            mCaptureProcessor->getStreamId());
-    result.appendFormat("    Recording stream ID: %d\n", mRecordingStreamId);
+            getCaptureStreamId());
+    result.appendFormat("    Recording stream ID: %d\n",
+            getRecordingStreamId());
 
     result.append("  Current requests:\n");
     if (mPreviewRequest.entryCount() != 0) {
@@ -314,15 +341,6 @@
         write(fd, result.string(), result.size());
     }
 
-    if (mCaptureRequest.entryCount() != 0) {
-        result = "    Capture request:\n";
-        write(fd, result.string(), result.size());
-        mCaptureRequest.dump(fd, 2, 6);
-    } else {
-        result = "    Capture request: undefined\n";
-        write(fd, result.string(), result.size());
-    }
-
     if (mRecordingRequest.entryCount() != 0) {
         result = "    Recording request:\n";
         write(fd, result.string(), result.size());
@@ -332,6 +350,8 @@
         write(fd, result.string(), result.size());
     }
 
+    mCaptureSequencer->dump(fd, args);
+
     mFrameProcessor->dump(fd, args);
 
     result = "  Device dump:\n";
@@ -366,7 +386,7 @@
         mPreviewStreamId = NO_STREAM;
     }
 
-    mCaptureProcessor->deleteStream();
+    mJpegProcessor->deleteStream();
 
     if (mRecordingStreamId != NO_STREAM) {
         mDevice->deleteStream(mRecordingStreamId);
@@ -375,6 +395,8 @@
 
     mCallbackProcessor->deleteStream();
 
+    mZslProcessor->deleteStream();
+
     mDevice.clear();
     SharedParameters::Lock l(mParameters);
     l.mParameters.state = Parameters::DISCONNECTED;
@@ -623,41 +645,86 @@
             return res;
         }
     }
-
-    if (mPreviewRequest.entryCount() == 0) {
-        res = updatePreviewRequest(params);
+    if (params.zslMode && !params.recordingHint) {
+        res = mZslProcessor->updateStream(params);
         if (res != OK) {
-            ALOGE("%s: Camera %d: Unable to create preview request: %s (%d)",
+            ALOGE("%s: Camera %d: Unable to update ZSL stream: %s (%d)",
                     __FUNCTION__, mCameraId, strerror(-res), res);
             return res;
         }
     }
 
-    if (callbacksEnabled) {
-        uint8_t outputStreams[2] =
-                { mPreviewStreamId, mCallbackProcessor->getStreamId() };
-        res = mPreviewRequest.update(
-                ANDROID_REQUEST_OUTPUT_STREAMS,
-                outputStreams, 2);
+    CameraMetadata *request;
+    if (!params.recordingHint) {
+        if (mPreviewRequest.entryCount() == 0) {
+            res = updatePreviewRequest(params);
+            if (res != OK) {
+                ALOGE("%s: Camera %d: Unable to create preview request: %s (%d)",
+                        __FUNCTION__, mCameraId, strerror(-res), res);
+                return res;
+            }
+        }
+        request = &mPreviewRequest;
     } else {
-        uint8_t outputStreams[1] = { mPreviewStreamId };
-        res = mPreviewRequest.update(
-                ANDROID_REQUEST_OUTPUT_STREAMS,
-                outputStreams, 1);
+        // With recording hint set, we're going to be operating under the
+        // assumption that the user will record video. To optimize recording
+        // startup time, create the necessary output streams for recording and
+        // video snapshot now if they don't already exist.
+        if (mRecordingRequest.entryCount() == 0) {
+            res = updateRecordingRequest(params);
+            if (res != OK) {
+                ALOGE("%s: Camera %d: Unable to create recording preview "
+                        "request: %s (%d)",
+                        __FUNCTION__, mCameraId, strerror(-res), res);
+                return res;
+            }
+        }
+        request = &mRecordingRequest;
+
+        res = updateRecordingStream(params);
+        if (res != OK) {
+            ALOGE("%s: Camera %d: Unable to pre-configure recording "
+                    "stream: %s (%d)",
+                    __FUNCTION__, mCameraId, strerror(-res), res);
+            return res;
+        }
+
+        res = mJpegProcessor->updateStream(params);
+        if (res != OK) {
+            ALOGE("%s: Camera %d: Can't pre-configure still image "
+                    "stream: %s (%d)",
+                    __FUNCTION__, mCameraId, strerror(-res), res);
+            return res;
+        }
     }
+
+    Vector<uint8_t> outputStreams;
+    outputStreams.push(getPreviewStreamId());
+
+    if (callbacksEnabled) {
+        outputStreams.push(getCallbackStreamId());
+    }
+    if (params.zslMode && !params.recordingHint) {
+        outputStreams.push(getZslStreamId());
+    }
+
+    res = request->update(
+        ANDROID_REQUEST_OUTPUT_STREAMS,
+        outputStreams);
+
     if (res != OK) {
         ALOGE("%s: Camera %d: Unable to set up preview request: %s (%d)",
                 __FUNCTION__, mCameraId, strerror(-res), res);
         return res;
     }
-    res = mPreviewRequest.sort();
+    res = request->sort();
     if (res != OK) {
         ALOGE("%s: Camera %d: Error sorting preview request: %s (%d)",
                 __FUNCTION__, mCameraId, strerror(-res), res);
         return res;
     }
 
-    res = mDevice->setStreamingRequest(mPreviewRequest);
+    res = mDevice->setStreamingRequest(*request);
     if (res != OK) {
         ALOGE("%s: Camera %d: Unable to set preview request to start preview: "
                 "%s (%d)",
@@ -790,6 +857,8 @@
         return INVALID_OPERATION;
     }
 
+    mCameraService->playSound(CameraService::SOUND_RECORDING);
+
     res = updateRecordingStream(params);
     if (res != OK) {
         ALOGE("%s: Camera %d: Unable to update recording stream: %s (%d)",
@@ -817,14 +886,19 @@
     }
 
     if (callbacksEnabled) {
-        uint8_t outputStreams[3] =
-                { mPreviewStreamId, mRecordingStreamId,
-                  mCallbackProcessor->getStreamId() };
+        uint8_t outputStreams[3] ={
+            getPreviewStreamId(),
+            getRecordingStreamId(),
+            getCallbackStreamId()
+        };
         res = mRecordingRequest.update(
                 ANDROID_REQUEST_OUTPUT_STREAMS,
                 outputStreams, 3);
     } else {
-        uint8_t outputStreams[2] = { mPreviewStreamId, mRecordingStreamId };
+        uint8_t outputStreams[2] = {
+            getPreviewStreamId(),
+            getRecordingStreamId()
+        };
         res = mRecordingRequest.update(
                 ANDROID_REQUEST_OUTPUT_STREAMS,
                 outputStreams, 2);
@@ -879,19 +953,13 @@
             return;
     };
 
-    // Back to preview. Since record can only be reached through preview,
-    // all preview stream setup should be up to date.
-    res = mDevice->setStreamingRequest(mPreviewRequest);
+    mCameraService->playSound(CameraService::SOUND_RECORDING);
+
+    res = startPreviewL(l.mParameters, true);
     if (res != OK) {
-        ALOGE("%s: Camera %d: Unable to switch back to preview request: "
-                "%s (%d)", __FUNCTION__, mCameraId, strerror(-res), res);
-        return;
+        ALOGE("%s: Camera %d: Unable to return to preview",
+                __FUNCTION__, mCameraId);
     }
-
-    // TODO: Should recording heap be freed? Can't do it yet since requests
-    // could still be in flight.
-
-    l.mParameters.state = Parameters::PREVIEW;
 }
 
 bool Camera2Client::recordingEnabled() {
@@ -1020,8 +1088,18 @@
                     __FUNCTION__, mCameraId);
             return INVALID_OPERATION;
         case Parameters::PREVIEW:
-        case Parameters::RECORD:
             // Good to go for takePicture
+            res = commandStopFaceDetectionL(l.mParameters);
+            if (res != OK) {
+                ALOGE("%s: Camera %d: Unable to stop face detection for still capture",
+                        __FUNCTION__, mCameraId);
+                return res;
+            }
+            l.mParameters.state = Parameters::STILL_CAPTURE;
+            break;
+        case Parameters::RECORD:
+            // Good to go for video snapshot
+            l.mParameters.state = Parameters::VIDEO_SNAPSHOT;
             break;
         case Parameters::STILL_CAPTURE:
         case Parameters::VIDEO_SNAPSHOT:
@@ -1032,130 +1110,20 @@
 
     ALOGV("%s: Camera %d: Starting picture capture", __FUNCTION__, mCameraId);
 
-    res = mCaptureProcessor->updateStream(l.mParameters);
+    res = mJpegProcessor->updateStream(l.mParameters);
     if (res != OK) {
         ALOGE("%s: Camera %d: Can't set up still image stream: %s (%d)",
                 __FUNCTION__, mCameraId, strerror(-res), res);
         return res;
     }
 
-    if (mCaptureRequest.entryCount() == 0) {
-        res = updateCaptureRequest(l.mParameters);
-        if (res != OK) {
-            ALOGE("%s: Camera %d: Can't create still image capture request: "
-                    "%s (%d)", __FUNCTION__, mCameraId, strerror(-res), res);
-            return res;
-        }
-    }
-
-    bool callbacksEnabled = l.mParameters.previewCallbackFlags &
-            CAMERA_FRAME_CALLBACK_FLAG_ENABLE_MASK;
-    bool recordingEnabled = (l.mParameters.state == Parameters::RECORD);
-
-    int captureStreamId = mCaptureProcessor->getStreamId();
-
-    int streamSwitch = (callbacksEnabled ? 0x2 : 0x0) +
-            (recordingEnabled ? 0x1 : 0x0);
-    switch ( streamSwitch ) {
-        case 0: { // No recording, callbacks
-            uint8_t streamIds[2] = {
-                mPreviewStreamId,
-                captureStreamId
-            };
-            res = mCaptureRequest.update(ANDROID_REQUEST_OUTPUT_STREAMS,
-                    streamIds, 2);
-            break;
-        }
-        case 1: { // Recording
-            uint8_t streamIds[3] = {
-                mPreviewStreamId,
-                mRecordingStreamId,
-                captureStreamId
-            };
-            res = mCaptureRequest.update(ANDROID_REQUEST_OUTPUT_STREAMS,
-                    streamIds, 3);
-            break;
-        }
-        case 2: { // Callbacks
-            uint8_t streamIds[3] = {
-                mPreviewStreamId,
-                mCallbackProcessor->getStreamId(),
-                captureStreamId
-            };
-            res = mCaptureRequest.update(ANDROID_REQUEST_OUTPUT_STREAMS,
-                    streamIds, 3);
-            break;
-        }
-        case 3: { // Both
-            uint8_t streamIds[4] = {
-                mPreviewStreamId,
-                mCallbackProcessor->getStreamId(),
-                mRecordingStreamId,
-                captureStreamId
-            };
-            res = mCaptureRequest.update(ANDROID_REQUEST_OUTPUT_STREAMS,
-                    streamIds, 4);
-            break;
-        }
-    };
+    res = mCaptureSequencer->startCapture();
     if (res != OK) {
-        ALOGE("%s: Camera %d: Unable to set up still image capture request: "
-                "%s (%d)",
+        ALOGE("%s: Camera %d: Unable to start capture: %s (%d)",
                 __FUNCTION__, mCameraId, strerror(-res), res);
-        return res;
-    }
-    res = mCaptureRequest.sort();
-    if (res != OK) {
-        ALOGE("%s: Camera %d: Unable to sort capture request: %s (%d)",
-                __FUNCTION__, mCameraId, strerror(-res), res);
-        return res;
     }
 
-    CameraMetadata captureCopy = mCaptureRequest;
-    if (captureCopy.entryCount() == 0) {
-        ALOGE("%s: Camera %d: Unable to copy capture request for HAL device",
-                __FUNCTION__, mCameraId);
-        return NO_MEMORY;
-    }
-
-    if (l.mParameters.state == Parameters::PREVIEW) {
-        res = mDevice->clearStreamingRequest();
-        if (res != OK) {
-            ALOGE("%s: Camera %d: Unable to stop preview for still capture: "
-                    "%s (%d)",
-                    __FUNCTION__, mCameraId, strerror(-res), res);
-            return res;
-        }
-    }
-    // TODO: Capture should be atomic with setStreamingRequest here
-    res = mDevice->capture(captureCopy);
-    if (res != OK) {
-        ALOGE("%s: Camera %d: Unable to submit still image capture request: "
-                "%s (%d)",
-                __FUNCTION__, mCameraId, strerror(-res), res);
-        return res;
-    }
-
-    switch (l.mParameters.state) {
-        case Parameters::PREVIEW:
-            l.mParameters.state = Parameters::STILL_CAPTURE;
-            res = commandStopFaceDetectionL(l.mParameters);
-            if (res != OK) {
-                ALOGE("%s: Camera %d: Unable to stop face detection for still capture",
-                        __FUNCTION__, mCameraId);
-                return res;
-            }
-            break;
-        case Parameters::RECORD:
-            l.mParameters.state = Parameters::VIDEO_SNAPSHOT;
-            break;
-        default:
-            ALOGE("%s: Camera %d: Unknown state for still capture!",
-                    __FUNCTION__, mCameraId);
-            return INVALID_OPERATION;
-    }
-
-    return OK;
+    return res;
 }
 
 status_t Camera2Client::setParameters(const String8& params) {
@@ -1501,6 +1469,7 @@
 void Camera2Client::notifyAutoExposure(uint8_t newState, int triggerId) {
     ALOGV("%s: Autoexposure state now %d, last trigger %d",
             __FUNCTION__, newState, triggerId);
+    mCaptureSequencer->notifyAutoExposure(newState, triggerId);
 }
 
 void Camera2Client::notifyAutoWhitebalance(uint8_t newState, int triggerId) {
@@ -1508,7 +1477,7 @@
             __FUNCTION__, newState, triggerId);
 }
 
-int Camera2Client::getCameraId() {
+int Camera2Client::getCameraId() const {
     return mCameraId;
 }
 
@@ -1516,10 +1485,43 @@
     return mDevice;
 }
 
+const sp<CameraService>& Camera2Client::getCameraService() {
+    return mCameraService;
+}
+
 camera2::SharedParameters& Camera2Client::getParameters() {
     return mParameters;
 }
 
+int Camera2Client::getPreviewStreamId() const {
+    return mPreviewStreamId;
+}
+
+int Camera2Client::getCaptureStreamId() const {
+    return mJpegProcessor->getStreamId();
+}
+
+int Camera2Client::getCallbackStreamId() const {
+    return mCallbackProcessor->getStreamId();
+}
+
+int Camera2Client::getRecordingStreamId() const {
+    return mRecordingStreamId;
+}
+
+int Camera2Client::getZslStreamId() const {
+    return mZslProcessor->getStreamId();
+}
+
+status_t Camera2Client::registerFrameListener(int32_t id,
+        wp<camera2::FrameProcessor::FilteredListener> listener) {
+    return mFrameProcessor->registerListener(id, listener);
+}
+
+status_t Camera2Client::removeFrameListener(int32_t id) {
+    return mFrameProcessor->removeListener(id);
+}
+
 Camera2Client::SharedCameraClient::Lock::Lock(SharedCameraClient &client):
         mCameraClient(client.mCameraClient),
         mSharedClient(client) {
@@ -1546,6 +1548,10 @@
     mCameraClient.clear();
 }
 
+const int32_t Camera2Client::kPreviewRequestId;
+const int32_t Camera2Client::kRecordRequestId;
+const int32_t Camera2Client::kFirstCaptureRequestId;
+
 void Camera2Client::onRecordingFrameAvailable() {
     ATRACE_CALL();
     status_t res;
@@ -1647,7 +1653,7 @@
 
 /** Utility methods */
 
-status_t Camera2Client::updateRequests(const Parameters &params) {
+status_t Camera2Client::updateRequests(Parameters &params) {
     status_t res;
 
     res = updatePreviewRequest(params);
@@ -1656,13 +1662,6 @@
                 __FUNCTION__, mCameraId, strerror(-res), res);
         return res;
     }
-    res = updateCaptureRequest(params);
-    if (res != OK) {
-        ALOGE("%s: Camera %d: Unable to update capture request: %s (%d)",
-                __FUNCTION__, mCameraId, strerror(-res), res);
-        return res;
-    }
-
     res = updateRecordingRequest(params);
     if (res != OK) {
         ALOGE("%s: Camera %d: Unable to update recording request: %s (%d)",
@@ -1671,7 +1670,7 @@
     }
 
     if (params.state == Parameters::PREVIEW) {
-        res = mDevice->setStreamingRequest(mPreviewRequest);
+        res = startPreviewL(params, true);
         if (res != OK) {
             ALOGE("%s: Camera %d: Error streaming new preview request: %s (%d)",
                     __FUNCTION__, mCameraId, strerror(-res), res);
@@ -1761,7 +1760,7 @@
         }
     }
 
-    res = updateRequestCommon(&mPreviewRequest, params);
+    res = params.updateRequest(&mPreviewRequest);
     if (res != OK) {
         ALOGE("%s: Camera %d: Unable to update common entries of preview "
                 "request: %s (%d)", __FUNCTION__, mCameraId,
@@ -1769,65 +1768,8 @@
         return res;
     }
 
-    return OK;
-}
-
-status_t Camera2Client::updateCaptureRequest(const Parameters &params) {
-    ATRACE_CALL();
-    status_t res;
-    if (mCaptureRequest.entryCount() == 0) {
-        res = mDevice->createDefaultRequest(CAMERA2_TEMPLATE_STILL_CAPTURE,
-                &mCaptureRequest);
-        if (res != OK) {
-            ALOGE("%s: Camera %d: Unable to create default still image request:"
-                    " %s (%d)", __FUNCTION__, mCameraId, strerror(-res), res);
-            return res;
-        }
-    }
-
-    res = updateRequestCommon(&mCaptureRequest, params);
-    if (res != OK) {
-        ALOGE("%s: Camera %d: Unable to update common entries of capture "
-                "request: %s (%d)", __FUNCTION__, mCameraId,
-                strerror(-res), res);
-        return res;
-    }
-
-    res = mCaptureRequest.update(ANDROID_JPEG_THUMBNAIL_SIZE,
-            params.jpegThumbSize, 2);
-    if (res != OK) return res;
-    res = mCaptureRequest.update(ANDROID_JPEG_THUMBNAIL_QUALITY,
-            &params.jpegThumbQuality, 1);
-    if (res != OK) return res;
-    res = mCaptureRequest.update(ANDROID_JPEG_QUALITY,
-            &params.jpegQuality, 1);
-    if (res != OK) return res;
-    res = mCaptureRequest.update(
-            ANDROID_JPEG_ORIENTATION,
-            &params.jpegRotation, 1);
-    if (res != OK) return res;
-
-    if (params.gpsEnabled) {
-        res = mCaptureRequest.update(
-                ANDROID_JPEG_GPS_COORDINATES,
-                params.gpsCoordinates, 3);
-        if (res != OK) return res;
-        res = mCaptureRequest.update(
-                ANDROID_JPEG_GPS_TIMESTAMP,
-                &params.gpsTimestamp, 1);
-        if (res != OK) return res;
-        res = mCaptureRequest.update(
-                ANDROID_JPEG_GPS_PROCESSING_METHOD,
-                params.gpsProcessingMethod);
-        if (res != OK) return res;
-    } else {
-        res = mCaptureRequest.erase(ANDROID_JPEG_GPS_COORDINATES);
-        if (res != OK) return res;
-        res = mCaptureRequest.erase(ANDROID_JPEG_GPS_TIMESTAMP);
-        if (res != OK) return res;
-        res = mCaptureRequest.erase(ANDROID_JPEG_GPS_PROCESSING_METHOD);
-        if (res != OK) return res;
-    }
+    res = mPreviewRequest.update(ANDROID_REQUEST_ID,
+            &kPreviewRequestId, 1);
 
     return OK;
 }
@@ -1845,7 +1787,7 @@
         }
     }
 
-    res = updateRequestCommon(&mRecordingRequest, params);
+    res = params.updateRequest(&mRecordingRequest);
     if (res != OK) {
         ALOGE("%s: Camera %d: Unable to update common entries of recording "
                 "request: %s (%d)", __FUNCTION__, mCameraId,
@@ -1913,197 +1855,6 @@
     return OK;
 }
 
-status_t Camera2Client::updateRequestCommon(CameraMetadata *request,
-        const Parameters &params) const {
-    ATRACE_CALL();
-    status_t res;
-    res = request->update(ANDROID_CONTROL_AE_TARGET_FPS_RANGE,
-            params.previewFpsRange, 2);
-    if (res != OK) return res;
-
-    uint8_t wbMode = params.autoWhiteBalanceLock ?
-            (uint8_t)ANDROID_CONTROL_AWB_LOCKED : params.wbMode;
-    res = request->update(ANDROID_CONTROL_AWB_MODE,
-            &wbMode, 1);
-    if (res != OK) return res;
-    res = request->update(ANDROID_CONTROL_EFFECT_MODE,
-            &params.effectMode, 1);
-    if (res != OK) return res;
-    res = request->update(ANDROID_CONTROL_AE_ANTIBANDING_MODE,
-            &params.antibandingMode, 1);
-    if (res != OK) return res;
-
-    uint8_t controlMode =
-            (params.sceneMode == ANDROID_CONTROL_SCENE_MODE_UNSUPPORTED) ?
-            ANDROID_CONTROL_AUTO : ANDROID_CONTROL_USE_SCENE_MODE;
-    res = request->update(ANDROID_CONTROL_MODE,
-            &controlMode, 1);
-    if (res != OK) return res;
-    if (controlMode == ANDROID_CONTROL_USE_SCENE_MODE) {
-        res = request->update(ANDROID_CONTROL_SCENE_MODE,
-                &params.sceneMode, 1);
-        if (res != OK) return res;
-    }
-
-    uint8_t flashMode = ANDROID_FLASH_OFF;
-    uint8_t aeMode;
-    switch (params.flashMode) {
-        case Parameters::FLASH_MODE_OFF:
-            aeMode = ANDROID_CONTROL_AE_ON; break;
-        case Parameters::FLASH_MODE_AUTO:
-            aeMode = ANDROID_CONTROL_AE_ON_AUTO_FLASH; break;
-        case Parameters::FLASH_MODE_ON:
-            aeMode = ANDROID_CONTROL_AE_ON_ALWAYS_FLASH; break;
-        case Parameters::FLASH_MODE_TORCH:
-            aeMode = ANDROID_CONTROL_AE_ON;
-            flashMode = ANDROID_FLASH_TORCH;
-            break;
-        case Parameters::FLASH_MODE_RED_EYE:
-            aeMode = ANDROID_CONTROL_AE_ON_AUTO_FLASH_REDEYE; break;
-        default:
-            ALOGE("%s: Camera %d: Unknown flash mode %d", __FUNCTION__,
-                    mCameraId, params.flashMode);
-            return BAD_VALUE;
-    }
-    if (params.autoExposureLock) aeMode = ANDROID_CONTROL_AE_LOCKED;
-
-    res = request->update(ANDROID_FLASH_MODE,
-            &flashMode, 1);
-    if (res != OK) return res;
-    res = request->update(ANDROID_CONTROL_AE_MODE,
-            &aeMode, 1);
-    if (res != OK) return res;
-
-    float focusDistance = 0; // infinity focus in diopters
-    uint8_t focusMode;
-    switch (params.focusMode) {
-        case Parameters::FOCUS_MODE_AUTO:
-        case Parameters::FOCUS_MODE_MACRO:
-        case Parameters::FOCUS_MODE_CONTINUOUS_VIDEO:
-        case Parameters::FOCUS_MODE_CONTINUOUS_PICTURE:
-        case Parameters::FOCUS_MODE_EDOF:
-            focusMode = params.focusMode;
-            break;
-        case Parameters::FOCUS_MODE_INFINITY:
-        case Parameters::FOCUS_MODE_FIXED:
-            focusMode = ANDROID_CONTROL_AF_OFF;
-            break;
-        default:
-            ALOGE("%s: Camera %d: Unknown focus mode %d", __FUNCTION__,
-                    mCameraId, params.focusMode);
-            return BAD_VALUE;
-    }
-    res = request->update(ANDROID_LENS_FOCUS_DISTANCE,
-            &focusDistance, 1);
-    if (res != OK) return res;
-    res = request->update(ANDROID_CONTROL_AF_MODE,
-            &focusMode, 1);
-    if (res != OK) return res;
-
-    size_t focusingAreasSize = params.focusingAreas.size() * 5;
-    int32_t *focusingAreas = new int32_t[focusingAreasSize];
-    for (size_t i = 0; i < focusingAreasSize; i += 5) {
-        if (params.focusingAreas[i].weight != 0) {
-            focusingAreas[i + 0] =
-                    params.normalizedXToArray(params.focusingAreas[i].left);
-            focusingAreas[i + 1] =
-                    params.normalizedYToArray(params.focusingAreas[i].top);
-            focusingAreas[i + 2] =
-                    params.normalizedXToArray(params.focusingAreas[i].right);
-            focusingAreas[i + 3] =
-                    params.normalizedYToArray(params.focusingAreas[i].bottom);
-        } else {
-            focusingAreas[i + 0] = 0;
-            focusingAreas[i + 1] = 0;
-            focusingAreas[i + 2] = 0;
-            focusingAreas[i + 3] = 0;
-        }
-        focusingAreas[i + 4] = params.focusingAreas[i].weight;
-    }
-    res = request->update(ANDROID_CONTROL_AF_REGIONS,
-            focusingAreas,focusingAreasSize);
-    if (res != OK) return res;
-    delete[] focusingAreas;
-
-    res = request->update(ANDROID_CONTROL_AE_EXP_COMPENSATION,
-            &params.exposureCompensation, 1);
-    if (res != OK) return res;
-
-    size_t meteringAreasSize = params.meteringAreas.size() * 5;
-    int32_t *meteringAreas = new int32_t[meteringAreasSize];
-    for (size_t i = 0; i < meteringAreasSize; i += 5) {
-        if (params.meteringAreas[i].weight != 0) {
-            meteringAreas[i + 0] =
-                params.normalizedXToArray(params.meteringAreas[i].left);
-            meteringAreas[i + 1] =
-                params.normalizedYToArray(params.meteringAreas[i].top);
-            meteringAreas[i + 2] =
-                params.normalizedXToArray(params.meteringAreas[i].right);
-            meteringAreas[i + 3] =
-                params.normalizedYToArray(params.meteringAreas[i].bottom);
-        } else {
-            meteringAreas[i + 0] = 0;
-            meteringAreas[i + 1] = 0;
-            meteringAreas[i + 2] = 0;
-            meteringAreas[i + 3] = 0;
-        }
-        meteringAreas[i + 4] = params.meteringAreas[i].weight;
-    }
-    res = request->update(ANDROID_CONTROL_AE_REGIONS,
-            meteringAreas, meteringAreasSize);
-    if (res != OK) return res;
-
-    res = request->update(ANDROID_CONTROL_AWB_REGIONS,
-            meteringAreas, meteringAreasSize);
-    if (res != OK) return res;
-    delete[] meteringAreas;
-
-    // Need to convert zoom index into a crop rectangle. The rectangle is
-    // chosen to maximize its area on the sensor
-
-    camera_metadata_ro_entry_t maxDigitalZoom =
-            mParameters.staticInfo(ANDROID_SCALER_AVAILABLE_MAX_ZOOM);
-    float zoomIncrement = (maxDigitalZoom.data.f[0] - 1) /
-            (params.NUM_ZOOM_STEPS-1);
-    float zoomRatio = 1 + zoomIncrement * params.zoom;
-
-    float zoomLeft, zoomTop, zoomWidth, zoomHeight;
-    if (params.previewWidth >= params.previewHeight) {
-        zoomWidth =  params.fastInfo.arrayWidth / zoomRatio;
-        zoomHeight = zoomWidth *
-                params.previewHeight / params.previewWidth;
-    } else {
-        zoomHeight = params.fastInfo.arrayHeight / zoomRatio;
-        zoomWidth = zoomHeight *
-                params.previewWidth / params.previewHeight;
-    }
-    zoomLeft = (params.fastInfo.arrayWidth - zoomWidth) / 2;
-    zoomTop = (params.fastInfo.arrayHeight - zoomHeight) / 2;
-
-    int32_t cropRegion[3] = { zoomLeft, zoomTop, zoomWidth };
-    res = request->update(ANDROID_SCALER_CROP_REGION,
-            cropRegion, 3);
-    if (res != OK) return res;
-
-    // TODO: Decide how to map recordingHint, or whether just to ignore it
-
-    uint8_t vstabMode = params.videoStabilization ?
-            ANDROID_CONTROL_VIDEO_STABILIZATION_ON :
-            ANDROID_CONTROL_VIDEO_STABILIZATION_OFF;
-    res = request->update(ANDROID_CONTROL_VIDEO_STABILIZATION_MODE,
-            &vstabMode, 1);
-    if (res != OK) return res;
-
-    uint8_t faceDetectMode = params.enableFaceDetect ?
-            params.fastInfo.bestFaceDetectMode :
-            (uint8_t)ANDROID_STATS_FACE_DETECTION_OFF;
-    res = request->update(ANDROID_STATS_FACE_DETECT_MODE,
-            &faceDetectMode, 1);
-    if (res != OK) return res;
-
-    return OK;
-}
-
 size_t Camera2Client::calculateBufferSize(int width, int height,
         int format, int stride) {
     switch (format) {
diff --git a/services/camera/libcameraservice/Camera2Client.h b/services/camera/libcameraservice/Camera2Client.h
index b2fd636..1eb024a 100644
--- a/services/camera/libcameraservice/Camera2Client.h
+++ b/services/camera/libcameraservice/Camera2Client.h
@@ -21,7 +21,9 @@
 #include "CameraService.h"
 #include "camera2/Parameters.h"
 #include "camera2/FrameProcessor.h"
-#include "camera2/CaptureProcessor.h"
+#include "camera2/JpegProcessor.h"
+#include "camera2/ZslProcessor.h"
+#include "camera2/CaptureSequencer.h"
 #include "camera2/CallbackProcessor.h"
 #include <binder/MemoryBase.h>
 #include <binder/MemoryHeapBase.h>
@@ -95,10 +97,21 @@
      * Interface used by independent components of Camera2Client.
      */
 
-    int getCameraId();
+    int getCameraId() const;
     const sp<Camera2Device>& getCameraDevice();
+    const sp<CameraService>& getCameraService();
     camera2::SharedParameters& getParameters();
 
+    int getPreviewStreamId() const;
+    int getCaptureStreamId() const;
+    int getCallbackStreamId() const;
+    int getRecordingStreamId() const;
+    int getZslStreamId() const;
+
+    status_t registerFrameListener(int32_t id,
+            wp<camera2::FrameProcessor::FilteredListener> listener);
+    status_t removeFrameListener(int32_t id);
+
     // Simple class to ensure that access to ICameraClient is serialized by
     // requiring mCameraClientLock to be locked before access to mCameraClient
     // is possible.
@@ -123,6 +136,10 @@
     static size_t calculateBufferSize(int width, int height,
             int format, int stride);
 
+    static const int32_t kPreviewRequestId = 1000;
+    static const int32_t kRecordRequestId  = 2000;
+    static const int32_t kFirstCaptureRequestId = 3000;
+
 private:
     /** ICamera interface-related private members */
 
@@ -160,7 +177,7 @@
     /** Camera device-related private members */
 
     void     setPreviewCallbackFlagL(Parameters &params, int flag);
-    status_t updateRequests(const Parameters &params);
+    status_t updateRequests(Parameters &params);
 
     // Used with stream IDs
     static const int NO_STREAM = -1;
@@ -183,9 +200,9 @@
 
     /* Still image capture related members */
 
-    sp<camera2::CaptureProcessor> mCaptureProcessor;
-    CameraMetadata mCaptureRequest;
-    status_t updateCaptureRequest(const Parameters &params);
+    sp<camera2::CaptureSequencer> mCaptureSequencer;
+    sp<camera2::JpegProcessor> mJpegProcessor;
+    sp<camera2::ZslProcessor> mZslProcessor;
 
     /* Recording related members */
 
@@ -228,18 +245,6 @@
 
     // Verify that caller is the owner of the camera
     status_t checkPid(const char *checkLocation) const;
-
-    // Update parameters all requests use, based on mParameters
-    status_t updateRequestCommon(CameraMetadata *request, const Parameters &params) const;
-
-    // Map from sensor active array pixel coordinates to normalized camera
-    // parameter coordinates. The former are (0,0)-(array width - 1, array height
-    // - 1), the latter from (-1000,-1000)-(1000,1000)
-    int normalizedXToArray(int x) const;
-    int normalizedYToArray(int y) const;
-    int arrayXToNormalized(int width) const;
-    int arrayYToNormalized(int height) const;
-
 };
 
 }; // namespace android
diff --git a/services/camera/libcameraservice/Camera2Device.cpp b/services/camera/libcameraservice/Camera2Device.cpp
index f62c0a0..a171c46 100644
--- a/services/camera/libcameraservice/Camera2Device.cpp
+++ b/services/camera/libcameraservice/Camera2Device.cpp
@@ -206,6 +206,42 @@
     return OK;
 }
 
+status_t Camera2Device::createReprocessStreamFromStream(int outputId, int *id) {
+    status_t res;
+    ALOGV("%s: E", __FUNCTION__);
+
+    bool found = false;
+    StreamList::iterator streamI;
+    for (streamI = mStreams.begin();
+         streamI != mStreams.end(); streamI++) {
+        if ((*streamI)->getId() == outputId) {
+            found = true;
+            break;
+        }
+    }
+    if (!found) {
+        ALOGE("%s: Camera %d: Output stream %d doesn't exist; can't create "
+                "reprocess stream from it!", __FUNCTION__, mId, outputId);
+        return BAD_VALUE;
+    }
+
+    sp<ReprocessStreamAdapter> stream = new ReprocessStreamAdapter(mDevice);
+
+    res = stream->connectToDevice((*streamI));
+    if (res != OK) {
+        ALOGE("%s: Camera %d: Unable to create reprocessing stream from "\
+                "stream %d: %s (%d)", __FUNCTION__, mId, outputId,
+                strerror(-res), res);
+        return res;
+    }
+
+    *id = stream->getId();
+
+    mReprocessStreams.push_back(stream);
+    return OK;
+}
+
+
 status_t Camera2Device::getStreamInfo(int id,
         uint32_t *width, uint32_t *height, uint32_t *format) {
     ALOGV("%s: E", __FUNCTION__);
@@ -277,6 +313,33 @@
     return OK;
 }
 
+status_t Camera2Device::deleteReprocessStream(int id) {
+    ALOGV("%s: E", __FUNCTION__);
+    bool found = false;
+    for (ReprocessStreamList::iterator streamI = mReprocessStreams.begin();
+         streamI != mReprocessStreams.end(); streamI++) {
+        if ((*streamI)->getId() == id) {
+            status_t res = (*streamI)->release();
+            if (res != OK) {
+                ALOGE("%s: Unable to release reprocess stream %d from "
+                        "HAL device: %s (%d)", __FUNCTION__, id,
+                        strerror(-res), res);
+                return res;
+            }
+            mReprocessStreams.erase(streamI);
+            found = true;
+            break;
+        }
+    }
+    if (!found) {
+        ALOGE("%s: Camera %d: Unable to find stream %d to delete",
+                __FUNCTION__, mId, id);
+        return BAD_VALUE;
+    }
+    return OK;
+}
+
+
 status_t Camera2Device::createDefaultRequest(int templateId,
         CameraMetadata *request) {
     status_t err;
@@ -405,6 +468,32 @@
     return res;
 }
 
+status_t Camera2Device::pushReprocessBuffer(int reprocessStreamId,
+        buffer_handle_t *buffer, wp<BufferReleasedListener> listener) {
+    ALOGV("%s: E", __FUNCTION__);
+    bool found = false;
+    status_t res = OK;
+    for (ReprocessStreamList::iterator streamI = mReprocessStreams.begin();
+         streamI != mReprocessStreams.end(); streamI++) {
+        if ((*streamI)->getId() == reprocessStreamId) {
+            res = (*streamI)->pushIntoStream(buffer, listener);
+            if (res != OK) {
+                ALOGE("%s: Unable to push buffer to reprocess stream %d: %s (%d)",
+                        __FUNCTION__, reprocessStreamId, strerror(-res), res);
+                return res;
+            }
+            found = true;
+            break;
+        }
+    }
+    if (!found) {
+        ALOGE("%s: Camera %d: Unable to find reprocess stream %d",
+                __FUNCTION__, mId, reprocessStreamId);
+        res = BAD_VALUE;
+    }
+    return res;
+}
+
 /**
  * Camera2Device::NotificationListener
  */
@@ -903,7 +992,7 @@
         }
 
         buffers[bufferIdx] = anwBuffers[bufferIdx]->handle;
-        ALOGV("%s: Buffer %p allocated", __FUNCTION__, (void*)(buffers[bufferIdx]));
+        ALOGV("%s: Buffer %p allocated", __FUNCTION__, (void*)buffers[bufferIdx]);
     }
 
     ALOGV("%s: Registering %d buffers with camera HAL", __FUNCTION__, mTotalBuffers);
@@ -1030,7 +1119,7 @@
             const_cast<StreamAdapter*>(static_cast<const StreamAdapter*>(w));
     stream->mFrameCount++;
     ALOGVV("Stream %d enqueue: Frame %d (%p) captured at %lld ns",
-            stream->mId, mFrameCount, (void*)(*buffer), timestamp);
+            stream->mId, stream->mFrameCount, (void*)(*buffer), timestamp);
     int state = stream->mState;
     if (state != ACTIVE) {
         ALOGE("%s: Called when in bad state: %d", __FUNCTION__, state);
@@ -1094,5 +1183,198 @@
     return native_window_set_crop(a, &crop);
 }
 
+/**
+ * Camera2Device::ReprocessStreamAdapter
+ */
+
+#ifndef container_of
+#define container_of(ptr, type, member) \
+    (type *)((char*)(ptr) - offsetof(type, member))
+#endif
+
+Camera2Device::ReprocessStreamAdapter::ReprocessStreamAdapter(camera2_device_t *d):
+        mState(RELEASED),
+        mDevice(d),
+        mId(-1),
+        mWidth(0), mHeight(0), mFormat(0),
+        mActiveBuffers(0),
+        mFrameCount(0)
+{
+    camera2_stream_in_ops::acquire_buffer = acquire_buffer;
+    camera2_stream_in_ops::release_buffer = release_buffer;
+}
+
+Camera2Device::ReprocessStreamAdapter::~ReprocessStreamAdapter() {
+    if (mState != RELEASED) {
+        release();
+    }
+}
+
+status_t Camera2Device::ReprocessStreamAdapter::connectToDevice(
+        const sp<StreamAdapter> &outputStream) {
+    status_t res;
+    ALOGV("%s: E", __FUNCTION__);
+
+    if (mState != RELEASED) return INVALID_OPERATION;
+    if (outputStream == NULL) {
+        ALOGE("%s: Null base stream passed to reprocess stream adapter",
+                __FUNCTION__);
+        return BAD_VALUE;
+    }
+
+    mBaseStream = outputStream;
+    mWidth = outputStream->getWidth();
+    mHeight = outputStream->getHeight();
+    mFormat = outputStream->getFormat();
+
+    ALOGV("%s: New reprocess stream parameters %d x %d, format 0x%x",
+            __FUNCTION__, mWidth, mHeight, mFormat);
+
+    // Allocate device-side stream interface
+
+    uint32_t id;
+    res = mDevice->ops->allocate_reprocess_stream_from_stream(mDevice,
+            outputStream->getId(), getStreamOps(),
+            &id);
+    if (res != OK) {
+        ALOGE("%s: Device reprocess stream allocation failed: %s (%d)",
+                __FUNCTION__, strerror(-res), res);
+        return res;
+    }
+
+    ALOGV("%s: Allocated reprocess stream id %d based on stream %d",
+            __FUNCTION__, id, outputStream->getId());
+
+    mId = id;
+
+    mState = ACTIVE;
+
+    return OK;
+}
+
+status_t Camera2Device::ReprocessStreamAdapter::release() {
+    status_t res;
+    ALOGV("%s: Releasing stream %d", __FUNCTION__, mId);
+    if (mState >= ACTIVE) {
+        res = mDevice->ops->release_reprocess_stream(mDevice, mId);
+        if (res != OK) {
+            ALOGE("%s: Unable to release stream %d",
+                    __FUNCTION__, mId);
+            return res;
+        }
+    }
+
+    List<QueueEntry>::iterator s;
+    for (s = mQueue.begin(); s != mQueue.end(); s++) {
+        sp<BufferReleasedListener> listener = s->releaseListener.promote();
+        if (listener != 0) listener->onBufferReleased(s->handle);
+    }
+    for (s = mInFlightQueue.begin(); s != mInFlightQueue.end(); s++) {
+        sp<BufferReleasedListener> listener = s->releaseListener.promote();
+        if (listener != 0) listener->onBufferReleased(s->handle);
+    }
+    mQueue.clear();
+    mInFlightQueue.clear();
+
+    mState = RELEASED;
+    return OK;
+}
+
+status_t Camera2Device::ReprocessStreamAdapter::pushIntoStream(
+    buffer_handle_t *handle, const wp<BufferReleasedListener> &releaseListener) {
+    // TODO: Some error checking here would be nice
+    ALOGV("%s: Pushing buffer %p to stream", __FUNCTION__, (void*)(*handle));
+
+    QueueEntry entry;
+    entry.handle = handle;
+    entry.releaseListener = releaseListener;
+    mQueue.push_back(entry);
+    return OK;
+}
+
+status_t Camera2Device::ReprocessStreamAdapter::dump(int fd,
+        const Vector<String16>& args) {
+    String8 result =
+            String8::format("      Reprocess stream %d: %d x %d, fmt 0x%x\n",
+                    mId, mWidth, mHeight, mFormat);
+    result.appendFormat("        acquired buffers: %d\n",
+            mActiveBuffers);
+    result.appendFormat("        frame count: %d\n",
+            mFrameCount);
+    write(fd, result.string(), result.size());
+    return OK;
+}
+
+const camera2_stream_in_ops *Camera2Device::ReprocessStreamAdapter::getStreamOps() {
+    return static_cast<camera2_stream_in_ops *>(this);
+}
+
+int Camera2Device::ReprocessStreamAdapter::acquire_buffer(
+    const camera2_stream_in_ops_t *w,
+        buffer_handle_t** buffer) {
+    int res;
+    ReprocessStreamAdapter* stream =
+            const_cast<ReprocessStreamAdapter*>(
+                static_cast<const ReprocessStreamAdapter*>(w));
+    if (stream->mState != ACTIVE) {
+        ALOGE("%s: Called when in bad state: %d", __FUNCTION__, stream->mState);
+        return INVALID_OPERATION;
+    }
+
+    if (stream->mQueue.empty()) {
+        *buffer = NULL;
+        return OK;
+    }
+
+    QueueEntry &entry = *(stream->mQueue.begin());
+
+    *buffer = entry.handle;
+
+    stream->mInFlightQueue.push_back(entry);
+    stream->mQueue.erase(stream->mQueue.begin());
+
+    stream->mActiveBuffers++;
+
+    ALOGV("Stream %d acquire: Buffer %p acquired", stream->mId,
+            (void*)(**buffer));
+    return OK;
+}
+
+int Camera2Device::ReprocessStreamAdapter::release_buffer(
+    const camera2_stream_in_ops_t* w,
+    buffer_handle_t* buffer) {
+    ReprocessStreamAdapter *stream =
+            const_cast<ReprocessStreamAdapter*>(
+                static_cast<const ReprocessStreamAdapter*>(w) );
+    stream->mFrameCount++;
+    ALOGV("Reprocess stream %d release: Frame %d (%p)",
+            stream->mId, stream->mFrameCount, (void*)*buffer);
+    int state = stream->mState;
+    if (state != ACTIVE) {
+        ALOGE("%s: Called when in bad state: %d", __FUNCTION__, state);
+        return INVALID_OPERATION;
+    }
+    stream->mActiveBuffers--;
+
+    List<QueueEntry>::iterator s;
+    for (s = stream->mInFlightQueue.begin(); s != stream->mInFlightQueue.end(); s++) {
+        if ( s->handle == buffer ) break;
+    }
+    if (s == stream->mInFlightQueue.end()) {
+        ALOGE("%s: Can't find buffer %p in in-flight list!", __FUNCTION__,
+                buffer);
+        return INVALID_OPERATION;
+    }
+
+    sp<BufferReleasedListener> listener = s->releaseListener.promote();
+    if (listener != 0) {
+        listener->onBufferReleased(s->handle);
+    } else {
+        ALOGE("%s: Can't free buffer - missing listener", __FUNCTION__);
+    }
+    stream->mInFlightQueue.erase(s);
+
+    return OK;
+}
 
 }; // namespace android
diff --git a/services/camera/libcameraservice/Camera2Device.h b/services/camera/libcameraservice/Camera2Device.h
index 64f4608..a327d8d 100644
--- a/services/camera/libcameraservice/Camera2Device.h
+++ b/services/camera/libcameraservice/Camera2Device.h
@@ -80,6 +80,12 @@
             int *id);
 
     /**
+     * Create an input reprocess stream that uses buffers from an existing
+     * output stream.
+     */
+    status_t createReprocessStreamFromStream(int outputId, int *id);
+
+    /**
      * Get information about a given stream.
      */
     status_t getStreamInfo(int id,
@@ -97,6 +103,12 @@
     status_t deleteStream(int id);
 
     /**
+     * Delete reprocess stream. Must not be called if there are requests in
+     * flight which reference that stream.
+     */
+    status_t deleteReprocessStream(int id);
+
+    /**
      * Create a metadata buffer with fields that the HAL device believes are
      * best for the given use case
      */
@@ -163,6 +175,21 @@
      */
     status_t triggerPrecaptureMetering(uint32_t id);
 
+    /**
+     * Abstract interface for clients that want to listen to reprocess buffer
+     * release events
+     */
+    struct BufferReleasedListener: public virtual RefBase {
+        virtual void onBufferReleased(buffer_handle_t *handle) = 0;
+    };
+
+    /**
+     * Push a buffer to be reprocessed into a reprocessing stream, and
+     * provide a listener to call once the buffer is returned by the HAL
+     */
+    status_t pushReprocessBuffer(int reprocessStreamId,
+            buffer_handle_t *buffer, wp<BufferReleasedListener> listener);
+
   private:
 
     const int mId;
@@ -343,6 +370,86 @@
     typedef List<sp<StreamAdapter> > StreamList;
     StreamList mStreams;
 
+    /**
+     * Adapter from an ANativeWindow interface to camera2 device stream ops.
+     * Also takes care of allocating/deallocating stream in device interface
+     */
+    class ReprocessStreamAdapter: public camera2_stream_in_ops, public virtual RefBase {
+      public:
+        ReprocessStreamAdapter(camera2_device_t *d);
+
+        ~ReprocessStreamAdapter();
+
+        /**
+         * Create a HAL device reprocess stream based on an existing output stream.
+         */
+        status_t connectToDevice(const sp<StreamAdapter> &outputStream);
+
+        status_t release();
+
+        /**
+         * Push buffer into stream for reprocessing. Takes ownership until it notifies
+         * that the buffer has been released
+         */
+        status_t pushIntoStream(buffer_handle_t *handle,
+                const wp<BufferReleasedListener> &releaseListener);
+
+        /**
+         * Get stream parameters.
+         * Only valid after a successful connectToDevice call.
+         */
+        int      getId() const     { return mId; }
+        uint32_t getWidth() const  { return mWidth; }
+        uint32_t getHeight() const { return mHeight; }
+        uint32_t getFormat() const { return mFormat; }
+
+        // Dump stream information
+        status_t dump(int fd, const Vector<String16>& args);
+
+      private:
+        enum {
+            ERROR = -1,
+            RELEASED = 0,
+            ACTIVE
+        } mState;
+
+        sp<ANativeWindow> mConsumerInterface;
+        wp<StreamAdapter> mBaseStream;
+
+        struct QueueEntry {
+            buffer_handle_t *handle;
+            wp<BufferReleasedListener> releaseListener;
+        };
+
+        List<QueueEntry> mQueue;
+
+        List<QueueEntry> mInFlightQueue;
+
+        camera2_device_t *mDevice;
+
+        uint32_t mId;
+        uint32_t mWidth;
+        uint32_t mHeight;
+        uint32_t mFormat;
+
+        /** Debugging information */
+        uint32_t mActiveBuffers;
+        uint32_t mFrameCount;
+        int64_t  mLastTimestamp;
+
+        const camera2_stream_in_ops *getStreamOps();
+
+        static int acquire_buffer(const camera2_stream_in_ops_t *w,
+                buffer_handle_t** buffer);
+
+        static int release_buffer(const camera2_stream_in_ops_t* w,
+                buffer_handle_t* buffer);
+
+    }; // class ReprocessStreamAdapter
+
+    typedef List<sp<ReprocessStreamAdapter> > ReprocessStreamList;
+    ReprocessStreamList mReprocessStreams;
+
     // Receives HAL notifications and routes them to the NotificationListener
     static void notificationCallback(int32_t msg_type,
             int32_t ext1,
diff --git a/services/camera/libcameraservice/camera2/BurstCapture.cpp b/services/camera/libcameraservice/camera2/BurstCapture.cpp
new file mode 100644
index 0000000..5020819
--- /dev/null
+++ b/services/camera/libcameraservice/camera2/BurstCapture.cpp
@@ -0,0 +1,112 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_NDEBUG 0
+#define LOG_TAG "BurstCapture"
+
+#include <utils/Log.h>
+#include <utils/Trace.h>
+
+#include "BurstCapture.h"
+
+#include "JpegCompressor.h"
+#include "../Camera2Client.h"
+
+namespace android {
+namespace camera2 {
+
+BurstCapture::BurstCapture(wp<Camera2Client> client, wp<CaptureSequencer> sequencer):
+    mCaptureStreamId(NO_STREAM),
+    mClient(client),
+    mSequencer(sequencer)
+{
+}
+
+BurstCapture::~BurstCapture() {
+}
+
+status_t BurstCapture::start(Vector<CameraMetadata> &metadatas, int32_t firstCaptureId) {
+    ALOGE("Not completely implemented");
+    return INVALID_OPERATION;
+}
+
+void BurstCapture::onFrameAvailable() {
+    ALOGV("%s", __FUNCTION__);
+    Mutex::Autolock l(mInputMutex);
+    if(!mInputChanged) {
+        mInputChanged = true;
+        mInputSignal.signal();
+    }
+}
+
+bool BurstCapture::threadLoop() {
+    status_t res;
+    {
+        Mutex::Autolock l(mInputMutex);
+        while(!mInputChanged) {
+            res = mInputSignal.waitRelative(mInputMutex, kWaitDuration);
+            if(res == TIMED_OUT) return true;
+        }
+        mInputChanged = false;
+    }
+
+    do {
+        sp<Camera2Client> client = mClient.promote();
+        if(client == 0) return false;
+        ALOGV("%s: Calling processFrameAvailable()", __FUNCTION__);
+        res = processFrameAvailable(client);
+    } while(res == OK);
+
+    return true;
+}
+
+CpuConsumer::LockedBuffer* BurstCapture::jpegEncode(
+    CpuConsumer::LockedBuffer *imgBuffer,
+    int quality)
+{
+    ALOGV("%s", __FUNCTION__);
+
+    CpuConsumer::LockedBuffer *imgEncoded = new CpuConsumer::LockedBuffer;
+    uint8_t *data = new uint8_t[ANDROID_JPEG_MAX_SIZE];
+    imgEncoded->data = data;
+    imgEncoded->width = imgBuffer->width;
+    imgEncoded->height = imgBuffer->height;
+    imgEncoded->stride = imgBuffer->stride;
+
+    Vector<CpuConsumer::LockedBuffer*> buffers;
+    buffers.push_back(imgBuffer);
+    buffers.push_back(imgEncoded);
+
+    sp<JpegCompressor> jpeg = new JpegCompressor();
+    status_t res = jpeg->start(buffers, 1);
+
+    bool success = jpeg->waitForDone(10 * 1e9);
+    if(success) {
+        return buffers[1];
+    }
+    else {
+        ALOGE("%s: JPEG encode timed out", __FUNCTION__);
+        return NULL;  // TODO: maybe change function return value to status_t
+    }
+}
+
+status_t BurstCapture::processFrameAvailable(sp<Camera2Client> &client) {
+    ALOGE("Not implemented");
+    return INVALID_OPERATION;
+}
+
+} // namespace camera2
+} // namespace android
diff --git a/services/camera/libcameraservice/camera2/BurstCapture.h b/services/camera/libcameraservice/camera2/BurstCapture.h
new file mode 100644
index 0000000..dfc45eb
--- /dev/null
+++ b/services/camera/libcameraservice/camera2/BurstCapture.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA_BURST_CAPTURE_H
+#define ANDROID_SERVERS_CAMERA_BURST_CAPTURE_H
+
+#include "camera2/CameraMetadata.h"
+#include <binder/MemoryBase.h>
+#include <binder/MemoryHeapBase.h>
+#include <gui/CpuConsumer.h>
+#include "Camera2Device.h"
+
+namespace android {
+
+class Camera2Client;
+
+namespace camera2 {
+
+class CaptureSequencer;
+
+class BurstCapture : public virtual Thread,
+                     public virtual CpuConsumer::FrameAvailableListener
+{
+public:
+    BurstCapture(wp<Camera2Client> client, wp<CaptureSequencer> sequencer);
+    virtual ~BurstCapture();
+
+    virtual void onFrameAvailable();
+    virtual status_t start(Vector<CameraMetadata> &metadatas, int32_t firstCaptureId);
+
+protected:
+    Mutex mInputMutex;
+    bool mInputChanged;
+    Condition mInputSignal;
+    int mCaptureStreamId;
+    wp<Camera2Client> mClient;
+    wp<CaptureSequencer> mSequencer;
+
+    // Should only be accessed by processing thread
+    enum {
+        NO_STREAM = -1
+    };
+
+    CpuConsumer::LockedBuffer* jpegEncode(
+        CpuConsumer::LockedBuffer *imgBuffer,
+        int quality);
+
+    virtual status_t processFrameAvailable(sp<Camera2Client> &client);
+
+private:
+    virtual bool threadLoop();
+    static const nsecs_t kWaitDuration = 10000000; // 10 ms
+};
+
+} // namespace camera2
+} // namespace android
+
+#endif
diff --git a/services/camera/libcameraservice/camera2/CallbackProcessor.cpp b/services/camera/libcameraservice/camera2/CallbackProcessor.cpp
index 854b890..ca917f2 100644
--- a/services/camera/libcameraservice/camera2/CallbackProcessor.cpp
+++ b/services/camera/libcameraservice/camera2/CallbackProcessor.cpp
@@ -39,6 +39,7 @@
 
 CallbackProcessor::~CallbackProcessor() {
     ALOGV("%s: Exit", __FUNCTION__);
+    deleteStream();
 }
 
 void CallbackProcessor::onFrameAvailable() {
@@ -126,6 +127,11 @@
         sp<Camera2Device> device = client->getCameraDevice();
 
         device->deleteStream(mCallbackStreamId);
+
+        mCallbackHeap.clear();
+        mCallbackWindow.clear();
+        mCallbackConsumer.clear();
+
         mCallbackStreamId = NO_STREAM;
     }
     return OK;
@@ -136,7 +142,7 @@
     return mCallbackStreamId;
 }
 
-void CallbackProcessor::dump(int fd, const Vector<String16>& args) {
+void CallbackProcessor::dump(int fd, const Vector<String16>& args) const {
 }
 
 bool CallbackProcessor::threadLoop() {
diff --git a/services/camera/libcameraservice/camera2/CallbackProcessor.h b/services/camera/libcameraservice/camera2/CallbackProcessor.h
index 36c51a3..c2a1372 100644
--- a/services/camera/libcameraservice/camera2/CallbackProcessor.h
+++ b/services/camera/libcameraservice/camera2/CallbackProcessor.h
@@ -48,7 +48,7 @@
     status_t deleteStream();
     int getStreamId() const;
 
-    void dump(int fd, const Vector<String16>& args);
+    void dump(int fd, const Vector<String16>& args) const;
   private:
     static const nsecs_t kWaitDuration = 10000000; // 10 ms
     wp<Camera2Client> mClient;
diff --git a/services/camera/libcameraservice/camera2/CameraMetadata.cpp b/services/camera/libcameraservice/camera2/CameraMetadata.cpp
index 95377b2..8399e20 100644
--- a/services/camera/libcameraservice/camera2/CameraMetadata.cpp
+++ b/services/camera/libcameraservice/camera2/CameraMetadata.cpp
@@ -84,6 +84,10 @@
             get_camera_metadata_entry_count(mBuffer);
 }
 
+bool CameraMetadata::isEmpty() const {
+    return entryCount() == 0;
+}
+
 status_t CameraMetadata::sort() {
     return sort_camera_metadata(mBuffer);
 }
diff --git a/services/camera/libcameraservice/camera2/CameraMetadata.h b/services/camera/libcameraservice/camera2/CameraMetadata.h
index 340414e..aee6cd7 100644
--- a/services/camera/libcameraservice/camera2/CameraMetadata.h
+++ b/services/camera/libcameraservice/camera2/CameraMetadata.h
@@ -87,6 +87,11 @@
     size_t entryCount() const;
 
     /**
+     * Is the buffer empty (no entires)
+     */
+    bool isEmpty() const;
+
+    /**
      * Sort metadata buffer for faster find
      */
     status_t sort();
diff --git a/services/camera/libcameraservice/camera2/CaptureSequencer.cpp b/services/camera/libcameraservice/camera2/CaptureSequencer.cpp
new file mode 100644
index 0000000..2f8b7db
--- /dev/null
+++ b/services/camera/libcameraservice/camera2/CaptureSequencer.cpp
@@ -0,0 +1,595 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "Camera2Client::CaptureSequencer"
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+//#define LOG_NDEBUG 0
+
+#include <utils/Log.h>
+#include <utils/Trace.h>
+#include <utils/Vector.h>
+
+#include "CaptureSequencer.h"
+#include "BurstCapture.h"
+#include "../Camera2Device.h"
+#include "../Camera2Client.h"
+#include "Parameters.h"
+
+namespace android {
+namespace camera2 {
+
+/** Public members */
+
+CaptureSequencer::CaptureSequencer(wp<Camera2Client> client):
+        Thread(false),
+        mStartCapture(false),
+        mBusy(false),
+        mNewAEState(false),
+        mNewFrameReceived(false),
+        mNewCaptureReceived(false),
+        mClient(client),
+        mCaptureState(IDLE),
+        mTriggerId(0),
+        mTimeoutCount(0),
+        mCaptureId(Camera2Client::kFirstCaptureRequestId) {
+    ALOGV("%s", __FUNCTION__);
+}
+
+CaptureSequencer::~CaptureSequencer() {
+    ALOGV("%s: Exit", __FUNCTION__);
+}
+
+void CaptureSequencer::setZslProcessor(wp<ZslProcessor> processor) {
+    Mutex::Autolock l(mInputMutex);
+    mZslProcessor = processor;
+}
+
+status_t CaptureSequencer::startCapture() {
+    ALOGV("%s", __FUNCTION__);
+    ATRACE_CALL();
+    Mutex::Autolock l(mInputMutex);
+    if (mBusy) {
+        ALOGE("%s: Already busy capturing!", __FUNCTION__);
+        return INVALID_OPERATION;
+    }
+    if (!mStartCapture) {
+        mStartCapture = true;
+        mStartCaptureSignal.signal();
+    }
+    return OK;
+}
+
+void CaptureSequencer::notifyAutoExposure(uint8_t newState, int triggerId) {
+    ATRACE_CALL();
+    Mutex::Autolock l(mInputMutex);
+    mAEState = newState;
+    mAETriggerId = triggerId;
+    if (!mNewAEState) {
+        mNewAEState = true;
+        mNewNotifySignal.signal();
+    }
+}
+
+void CaptureSequencer::onFrameAvailable(int32_t frameId,
+        CameraMetadata &frame) {
+    ALOGV("%s: Listener found new frame", __FUNCTION__);
+    ATRACE_CALL();
+    Mutex::Autolock l(mInputMutex);
+    mNewFrameId = frameId;
+    mNewFrame.acquire(frame);
+    if (!mNewFrameReceived) {
+        mNewFrameReceived = true;
+        mNewFrameSignal.signal();
+    }
+}
+
+void CaptureSequencer::onCaptureAvailable(nsecs_t timestamp) {
+    ATRACE_CALL();
+    ALOGV("%s", __FUNCTION__);
+    Mutex::Autolock l(mInputMutex);
+    mCaptureTimestamp = timestamp;
+    if (!mNewCaptureReceived) {
+        mNewCaptureReceived = true;
+        mNewCaptureSignal.signal();
+    }
+}
+
+
+void CaptureSequencer::dump(int fd, const Vector<String16>& args) {
+    String8 result;
+    if (mCaptureRequest.entryCount() != 0) {
+        result = "    Capture request:\n";
+        write(fd, result.string(), result.size());
+        mCaptureRequest.dump(fd, 2, 6);
+    } else {
+        result = "    Capture request: undefined\n";
+        write(fd, result.string(), result.size());
+    }
+    result = String8::format("    Current capture state: %s\n",
+            kStateNames[mCaptureState]);
+    result.append("    Latest captured frame:\n");
+    write(fd, result.string(), result.size());
+    mNewFrame.dump(fd, 2, 6);
+}
+
+/** Private members */
+
+const char* CaptureSequencer::kStateNames[CaptureSequencer::NUM_CAPTURE_STATES+1] =
+{
+    "IDLE",
+    "START",
+    "ZSL_START",
+    "ZSL_WAITING",
+    "ZSL_REPROCESSING",
+    "STANDARD_START",
+    "STANDARD_PRECAPTURE",
+    "STANDARD_CAPTURING",
+    "BURST_CAPTURE_START",
+    "BURST_CAPTURE_WAIT",
+    "DONE",
+    "ERROR",
+    "UNKNOWN"
+};
+
+const CaptureSequencer::StateManager
+        CaptureSequencer::kStateManagers[CaptureSequencer::NUM_CAPTURE_STATES-1] = {
+    &CaptureSequencer::manageIdle,
+    &CaptureSequencer::manageStart,
+    &CaptureSequencer::manageZslStart,
+    &CaptureSequencer::manageZslWaiting,
+    &CaptureSequencer::manageZslReprocessing,
+    &CaptureSequencer::manageStandardStart,
+    &CaptureSequencer::manageStandardPrecaptureWait,
+    &CaptureSequencer::manageStandardCapture,
+    &CaptureSequencer::manageStandardCaptureWait,
+    &CaptureSequencer::manageBurstCaptureStart,
+    &CaptureSequencer::manageBurstCaptureWait,
+    &CaptureSequencer::manageDone,
+};
+
+bool CaptureSequencer::threadLoop() {
+    status_t res;
+
+    sp<Camera2Client> client = mClient.promote();
+    if (client == 0) return false;
+
+    if (mCaptureState < ERROR) {
+        mCaptureState = (this->*kStateManagers[mCaptureState])(client);
+    } else {
+        ALOGE("%s: Bad capture state: %s",
+                __FUNCTION__, kStateNames[mCaptureState]);
+        return false;
+    }
+
+    return true;
+}
+
+CaptureSequencer::CaptureState CaptureSequencer::manageIdle(sp<Camera2Client> &client) {
+    status_t res;
+    ATRACE_CALL();
+    Mutex::Autolock l(mInputMutex);
+    while (!mStartCapture) {
+        res = mStartCaptureSignal.waitRelative(mInputMutex,
+                kWaitDuration);
+        if (res == TIMED_OUT) break;
+    }
+    if (mStartCapture) {
+        mStartCapture = false;
+        mBusy = true;
+        return START;
+    }
+    return IDLE;
+}
+
+CaptureSequencer::CaptureState CaptureSequencer::manageDone(sp<Camera2Client> &client) {
+    status_t res;
+    ATRACE_CALL();
+    mCaptureId++;
+
+    {
+        Mutex::Autolock l(mInputMutex);
+        mBusy = false;
+    }
+
+    SharedParameters::Lock l(client->getParameters());
+    switch (l.mParameters.state) {
+        case Parameters::STILL_CAPTURE:
+            l.mParameters.state = Parameters::STOPPED;
+            break;
+        case Parameters::VIDEO_SNAPSHOT:
+            l.mParameters.state = Parameters::RECORD;
+            break;
+        default:
+            ALOGE("%s: Camera %d: Still image produced unexpectedly "
+                    "in state %s!",
+                    __FUNCTION__, client->getCameraId(),
+                    Parameters::getStateName(l.mParameters.state));
+    }
+
+    return IDLE;
+}
+
+CaptureSequencer::CaptureState CaptureSequencer::manageStart(
+        sp<Camera2Client> &client) {
+    ALOGV("%s", __FUNCTION__);
+    status_t res;
+    ATRACE_CALL();
+    SharedParameters::Lock l(client->getParameters());
+    CaptureState nextState = DONE;
+
+    res = updateCaptureRequest(l.mParameters, client);
+    if (res != OK ) {
+        ALOGE("%s: Camera %d: Can't update still image capture request: %s (%d)",
+                __FUNCTION__, client->getCameraId(), strerror(-res), res);
+        return DONE;
+    }
+
+    if(l.mParameters.lightFx != Parameters::LIGHTFX_NONE &&
+            l.mParameters.state == Parameters::STILL_CAPTURE) {
+        nextState = BURST_CAPTURE_START;
+    }
+    else if (l.mParameters.zslMode &&
+            l.mParameters.state == Parameters::STILL_CAPTURE) {
+        nextState = ZSL_START;
+    } else {
+        nextState = STANDARD_START;
+    }
+
+    return nextState;
+}
+
+CaptureSequencer::CaptureState CaptureSequencer::manageZslStart(
+        sp<Camera2Client> &client) {
+    status_t res;
+    sp<ZslProcessor> processor = mZslProcessor.promote();
+    if (processor == 0) {
+        ALOGE("%s: No ZSL queue to use!", __FUNCTION__);
+        return DONE;
+    }
+
+    client->registerFrameListener(mCaptureId,
+            this);
+
+    res = client->getCameraDevice()->clearStreamingRequest();
+    if (res != OK) {
+        ALOGE("%s: Camera %d: Unable to stop preview for ZSL capture: "
+                "%s (%d)",
+                __FUNCTION__, client->getCameraId(), strerror(-res), res);
+        return DONE;
+    }
+    // TODO: Actually select the right thing here.
+    processor->pushToReprocess(mCaptureId);
+
+    mTimeoutCount = kMaxTimeoutsForCaptureEnd;
+    return STANDARD_CAPTURE_WAIT;
+}
+
+CaptureSequencer::CaptureState CaptureSequencer::manageZslWaiting(
+        sp<Camera2Client> &client) {
+    return DONE;
+}
+
+CaptureSequencer::CaptureState CaptureSequencer::manageZslReprocessing(
+        sp<Camera2Client> &client) {
+    return START;
+}
+
+CaptureSequencer::CaptureState CaptureSequencer::manageStandardStart(
+        sp<Camera2Client> &client) {
+    ATRACE_CALL();
+    client->registerFrameListener(mCaptureId,
+            this);
+    {
+        SharedParameters::Lock l(client->getParameters());
+        mTriggerId = l.mParameters.precaptureTriggerCounter++;
+    }
+    client->getCameraDevice()->triggerPrecaptureMetering(mTriggerId);
+
+    mAeInPrecapture = false;
+    mTimeoutCount = kMaxTimeoutsForPrecaptureStart;
+    return STANDARD_PRECAPTURE_WAIT;
+}
+
+CaptureSequencer::CaptureState CaptureSequencer::manageStandardPrecaptureWait(
+        sp<Camera2Client> &client) {
+    status_t res;
+    ATRACE_CALL();
+    Mutex::Autolock l(mInputMutex);
+    while (!mNewAEState) {
+        res = mNewNotifySignal.waitRelative(mInputMutex, kWaitDuration);
+        if (res == TIMED_OUT) {
+            mTimeoutCount--;
+            break;
+        }
+    }
+    if (mTimeoutCount <= 0) {
+        ALOGW("Timed out waiting for precapture %s",
+                mAeInPrecapture ? "end" : "start");
+        return STANDARD_CAPTURE;
+    }
+    if (mNewAEState) {
+        if (!mAeInPrecapture) {
+            // Waiting to see PRECAPTURE state
+            if (mAETriggerId == mTriggerId &&
+                    mAEState == ANDROID_CONTROL_AE_STATE_PRECAPTURE) {
+                ALOGV("%s: Got precapture start", __FUNCTION__);
+                mAeInPrecapture = true;
+                mTimeoutCount = kMaxTimeoutsForPrecaptureEnd;
+            }
+        } else {
+            // Waiting to see PRECAPTURE state end
+            if (mAETriggerId == mTriggerId &&
+                    mAEState != ANDROID_CONTROL_AE_STATE_PRECAPTURE) {
+                ALOGV("%s: Got precapture end", __FUNCTION__);
+                return STANDARD_CAPTURE;
+            }
+        }
+        mNewAEState = false;
+    }
+    return STANDARD_PRECAPTURE_WAIT;
+}
+
+CaptureSequencer::CaptureState CaptureSequencer::manageStandardCapture(
+        sp<Camera2Client> &client) {
+    status_t res;
+    ATRACE_CALL();
+    SharedParameters::Lock l(client->getParameters());
+    Vector<uint8_t> outputStreams;
+
+    outputStreams.push(client->getPreviewStreamId());
+    outputStreams.push(client->getCaptureStreamId());
+
+    if (l.mParameters.previewCallbackFlags &
+            CAMERA_FRAME_CALLBACK_FLAG_ENABLE_MASK) {
+        outputStreams.push(client->getCallbackStreamId());
+    }
+
+    if (l.mParameters.state == Parameters::VIDEO_SNAPSHOT) {
+        outputStreams.push(client->getRecordingStreamId());
+    }
+
+    res = mCaptureRequest.update(ANDROID_REQUEST_OUTPUT_STREAMS,
+            outputStreams);
+    if (res == OK) {
+        res = mCaptureRequest.update(ANDROID_REQUEST_ID,
+                &mCaptureId, 1);
+    }
+    if (res == OK) {
+        res = mCaptureRequest.sort();
+    }
+
+    if (res != OK) {
+        ALOGE("%s: Camera %d: Unable to set up still capture request: %s (%d)",
+                __FUNCTION__, client->getCameraId(), strerror(-res), res);
+        return DONE;
+    }
+
+    CameraMetadata captureCopy = mCaptureRequest;
+    if (captureCopy.entryCount() == 0) {
+        ALOGE("%s: Camera %d: Unable to copy capture request for HAL device",
+                __FUNCTION__, client->getCameraId());
+        return DONE;
+    }
+
+    if (l.mParameters.state == Parameters::STILL_CAPTURE) {
+        res = client->getCameraDevice()->clearStreamingRequest();
+        if (res != OK) {
+            ALOGE("%s: Camera %d: Unable to stop preview for still capture: "
+                    "%s (%d)",
+                    __FUNCTION__, client->getCameraId(), strerror(-res), res);
+            return DONE;
+        }
+    }
+    // TODO: Capture should be atomic with setStreamingRequest here
+    res = client->getCameraDevice()->capture(captureCopy);
+    if (res != OK) {
+        ALOGE("%s: Camera %d: Unable to submit still image capture request: "
+                "%s (%d)",
+                __FUNCTION__, client->getCameraId(), strerror(-res), res);
+        return DONE;
+    }
+
+    if (l.mParameters.playShutterSound) {
+        client->getCameraService()->playSound(CameraService::SOUND_SHUTTER);
+    }
+
+    mTimeoutCount = kMaxTimeoutsForCaptureEnd;
+    return STANDARD_CAPTURE_WAIT;
+}
+
+CaptureSequencer::CaptureState CaptureSequencer::manageStandardCaptureWait(
+        sp<Camera2Client> &client) {
+    status_t res;
+    ATRACE_CALL();
+    Mutex::Autolock l(mInputMutex);
+    while (!mNewFrameReceived) {
+        res = mNewFrameSignal.waitRelative(mInputMutex, kWaitDuration);
+        if (res == TIMED_OUT) {
+            mTimeoutCount--;
+            break;
+        }
+    }
+    while (!mNewCaptureReceived) {
+        res = mNewCaptureSignal.waitRelative(mInputMutex, kWaitDuration);
+        if (res == TIMED_OUT) {
+            mTimeoutCount--;
+            break;
+        }
+    }
+    if (mTimeoutCount <= 0) {
+        ALOGW("Timed out waiting for capture to complete");
+        return DONE;
+    }
+    if (mNewFrameReceived && mNewCaptureReceived) {
+        if (mNewFrameId != mCaptureId) {
+            ALOGW("Mismatched capture frame IDs: Expected %d, got %d",
+                    mCaptureId, mNewFrameId);
+        }
+        camera_metadata_entry_t entry;
+        entry = mNewFrame.find(ANDROID_SENSOR_TIMESTAMP);
+        if (entry.count == 0) {
+            ALOGE("No timestamp field in capture frame!");
+        }
+        if (entry.data.i64[0] != mCaptureTimestamp) {
+            ALOGW("Mismatched capture timestamps: Metadata frame %lld,"
+                    " captured buffer %lld", entry.data.i64[0], mCaptureTimestamp);
+        }
+        client->removeFrameListener(mCaptureId);
+
+        mNewFrameReceived = false;
+        mNewCaptureReceived = false;
+        return DONE;
+    }
+    return STANDARD_CAPTURE_WAIT;
+}
+
+CaptureSequencer::CaptureState CaptureSequencer::manageBurstCaptureStart(
+        sp<Camera2Client> &client) {
+    ALOGV("%s", __FUNCTION__);
+    status_t res;
+    ATRACE_CALL();
+
+    // check which burst mode is set, create respective burst object
+    {
+        SharedParameters::Lock l(client->getParameters());
+
+        res = updateCaptureRequest(l.mParameters, client);
+        if(res != OK) {
+            return DONE;
+        }
+
+        //
+        // check for burst mode type in mParameters here
+        //
+        mBurstCapture = new BurstCapture(client, this);
+    }
+
+    res = mCaptureRequest.update(ANDROID_REQUEST_ID, &mCaptureId, 1);
+    if (res == OK) {
+        res = mCaptureRequest.sort();
+    }
+    if (res != OK) {
+        ALOGE("%s: Camera %d: Unable to set up still capture request: %s (%d)",
+                __FUNCTION__, client->getCameraId(), strerror(-res), res);
+        return DONE;
+    }
+
+    CameraMetadata captureCopy = mCaptureRequest;
+    if (captureCopy.entryCount() == 0) {
+        ALOGE("%s: Camera %d: Unable to copy capture request for HAL device",
+                __FUNCTION__, client->getCameraId());
+        return DONE;
+    }
+
+    Vector<CameraMetadata> requests;
+    requests.push(mCaptureRequest);
+    res = mBurstCapture->start(requests, mCaptureId);
+    mTimeoutCount = kMaxTimeoutsForCaptureEnd * 10;
+    return BURST_CAPTURE_WAIT;
+}
+
+CaptureSequencer::CaptureState CaptureSequencer::manageBurstCaptureWait(
+        sp<Camera2Client> &client) {
+    status_t res;
+    ATRACE_CALL();
+
+    while (!mNewCaptureReceived) {
+        res = mNewCaptureSignal.waitRelative(mInputMutex, kWaitDuration);
+        if (res == TIMED_OUT) {
+            mTimeoutCount--;
+            break;
+        }
+    }
+
+    if (mTimeoutCount <= 0) {
+        ALOGW("Timed out waiting for burst capture to complete");
+        return DONE;
+    }
+    if (mNewCaptureReceived) {
+        mNewCaptureReceived = false;
+        // TODO: update mCaptureId to last burst's capture ID + 1?
+        return DONE;
+    }
+
+    return BURST_CAPTURE_WAIT;
+}
+
+status_t CaptureSequencer::updateCaptureRequest(const Parameters &params,
+        sp<Camera2Client> &client) {
+    ATRACE_CALL();
+    status_t res;
+    if (mCaptureRequest.entryCount() == 0) {
+        res = client->getCameraDevice()->createDefaultRequest(
+                CAMERA2_TEMPLATE_STILL_CAPTURE,
+                &mCaptureRequest);
+        if (res != OK) {
+            ALOGE("%s: Camera %d: Unable to create default still image request:"
+                    " %s (%d)", __FUNCTION__, client->getCameraId(),
+                    strerror(-res), res);
+            return res;
+        }
+    }
+
+    res = params.updateRequest(&mCaptureRequest);
+    if (res != OK) {
+        ALOGE("%s: Camera %d: Unable to update common entries of capture "
+                "request: %s (%d)", __FUNCTION__, client->getCameraId(),
+                strerror(-res), res);
+        return res;
+    }
+
+    res = mCaptureRequest.update(ANDROID_JPEG_THUMBNAIL_SIZE,
+            params.jpegThumbSize, 2);
+    if (res != OK) return res;
+    res = mCaptureRequest.update(ANDROID_JPEG_THUMBNAIL_QUALITY,
+            &params.jpegThumbQuality, 1);
+    if (res != OK) return res;
+    res = mCaptureRequest.update(ANDROID_JPEG_QUALITY,
+            &params.jpegQuality, 1);
+    if (res != OK) return res;
+    res = mCaptureRequest.update(
+            ANDROID_JPEG_ORIENTATION,
+            &params.jpegRotation, 1);
+    if (res != OK) return res;
+
+    if (params.gpsEnabled) {
+        res = mCaptureRequest.update(
+                ANDROID_JPEG_GPS_COORDINATES,
+                params.gpsCoordinates, 3);
+        if (res != OK) return res;
+        res = mCaptureRequest.update(
+                ANDROID_JPEG_GPS_TIMESTAMP,
+                &params.gpsTimestamp, 1);
+        if (res != OK) return res;
+        res = mCaptureRequest.update(
+                ANDROID_JPEG_GPS_PROCESSING_METHOD,
+                params.gpsProcessingMethod);
+        if (res != OK) return res;
+    } else {
+        res = mCaptureRequest.erase(ANDROID_JPEG_GPS_COORDINATES);
+        if (res != OK) return res;
+        res = mCaptureRequest.erase(ANDROID_JPEG_GPS_TIMESTAMP);
+        if (res != OK) return res;
+        res = mCaptureRequest.erase(ANDROID_JPEG_GPS_PROCESSING_METHOD);
+        if (res != OK) return res;
+    }
+
+    return OK;
+}
+
+
+}; // namespace camera2
+}; // namespace android
diff --git a/services/camera/libcameraservice/camera2/CaptureSequencer.h b/services/camera/libcameraservice/camera2/CaptureSequencer.h
new file mode 100644
index 0000000..39ae079
--- /dev/null
+++ b/services/camera/libcameraservice/camera2/CaptureSequencer.h
@@ -0,0 +1,161 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA_CAMERA2_CAPTURESEQUENCER_H
+#define ANDROID_SERVERS_CAMERA_CAMERA2_CAPTURESEQUENCER_H
+
+#include <utils/Thread.h>
+#include <utils/String16.h>
+#include <utils/Vector.h>
+#include <utils/Mutex.h>
+#include <utils/Condition.h>
+#include "CameraMetadata.h"
+#include "Parameters.h"
+#include "FrameProcessor.h"
+
+namespace android {
+
+class Camera2Client;
+
+namespace camera2 {
+
+class ZslProcessor;
+class BurstCapture;
+
+/**
+ * Manages the still image capture process for
+ * zero-shutter-lag, regular, and video snapshots.
+ */
+class CaptureSequencer:
+            virtual public Thread,
+            virtual public FrameProcessor::FilteredListener {
+  public:
+    CaptureSequencer(wp<Camera2Client> client);
+    ~CaptureSequencer();
+
+    // Get reference to the ZslProcessor, which holds the ZSL buffers and frames
+    void setZslProcessor(wp<ZslProcessor> processor);
+
+    // Begin still image capture
+    status_t startCapture();
+
+    // Notifications about AE state changes
+    void notifyAutoExposure(uint8_t newState, int triggerId);
+
+    // Notifications from the frame processor
+    virtual void onFrameAvailable(int32_t frameId, CameraMetadata &frame);
+
+    // Notifications from the capture processor
+    void onCaptureAvailable(nsecs_t timestamp);
+
+    void dump(int fd, const Vector<String16>& args);
+
+  private:
+    /**
+     * Accessed by other threads
+     */
+    Mutex mInputMutex;
+
+    bool mStartCapture;
+    bool mBusy;
+    Condition mStartCaptureSignal;
+
+    bool mNewAEState;
+    uint8_t mAEState;
+    int mAETriggerId;
+    Condition mNewNotifySignal;
+
+    bool mNewFrameReceived;
+    int32_t mNewFrameId;
+    CameraMetadata mNewFrame;
+    Condition mNewFrameSignal;
+
+    bool mNewCaptureReceived;
+    nsecs_t mCaptureTimestamp;
+    Condition mNewCaptureSignal;
+
+    /**
+     * Internal to CaptureSequencer
+     */
+    static const nsecs_t kWaitDuration = 100000000; // 100 ms
+    static const int kMaxTimeoutsForPrecaptureStart = 2; // 200 ms
+    static const int kMaxTimeoutsForPrecaptureEnd = 10;  // 1 sec
+    static const int kMaxTimeoutsForCaptureEnd    = 20;  // 2 sec
+
+    wp<Camera2Client> mClient;
+    wp<ZslProcessor> mZslProcessor;
+    sp<BurstCapture> mBurstCapture;
+
+    enum CaptureState {
+        IDLE,
+        START,
+        ZSL_START,
+        ZSL_WAITING,
+        ZSL_REPROCESSING,
+        STANDARD_START,
+        STANDARD_PRECAPTURE_WAIT,
+        STANDARD_CAPTURE,
+        STANDARD_CAPTURE_WAIT,
+        BURST_CAPTURE_START,
+        BURST_CAPTURE_WAIT,
+        DONE,
+        ERROR,
+        NUM_CAPTURE_STATES
+    } mCaptureState;
+    static const char* kStateNames[];
+
+    typedef CaptureState (CaptureSequencer::*StateManager)(sp<Camera2Client> &client);
+    static const StateManager kStateManagers[];
+
+    CameraMetadata mCaptureRequest;
+
+    int mTriggerId;
+    int mTimeoutCount;
+    bool mAeInPrecapture;
+
+    int32_t mCaptureId;
+
+    // Main internal methods
+
+    virtual bool threadLoop();
+
+    CaptureState manageIdle(sp<Camera2Client> &client);
+    CaptureState manageStart(sp<Camera2Client> &client);
+
+    CaptureState manageZslStart(sp<Camera2Client> &client);
+    CaptureState manageZslWaiting(sp<Camera2Client> &client);
+    CaptureState manageZslReprocessing(sp<Camera2Client> &client);
+
+    CaptureState manageStandardStart(sp<Camera2Client> &client);
+    CaptureState manageStandardPrecaptureWait(sp<Camera2Client> &client);
+    CaptureState manageStandardCapture(sp<Camera2Client> &client);
+    CaptureState manageStandardCaptureWait(sp<Camera2Client> &client);
+
+    CaptureState manageBurstCaptureStart(sp<Camera2Client> &client);
+    CaptureState manageBurstCaptureWait(sp<Camera2Client> &client);
+
+    CaptureState manageDone(sp<Camera2Client> &client);
+
+    // Utility methods
+
+    status_t updateCaptureRequest(const Parameters &params,
+            sp<Camera2Client> &client);
+};
+
+}; // namespace camera2
+}; // namespace android
+
+#endif
diff --git a/services/camera/libcameraservice/camera2/FrameProcessor.cpp b/services/camera/libcameraservice/camera2/FrameProcessor.cpp
index 5059754..e24db0b 100644
--- a/services/camera/libcameraservice/camera2/FrameProcessor.cpp
+++ b/services/camera/libcameraservice/camera2/FrameProcessor.cpp
@@ -36,6 +36,19 @@
     ALOGV("%s: Exit", __FUNCTION__);
 }
 
+status_t FrameProcessor::registerListener(int32_t id,
+        wp<FilteredListener> listener) {
+    Mutex::Autolock l(mInputMutex);
+    ALOGV("%s: Registering listener for frame id %d",
+            __FUNCTION__, id);
+    return mListeners.replaceValueFor(id, listener);
+}
+
+status_t FrameProcessor::removeListener(int32_t id) {
+    Mutex::Autolock l(mInputMutex);
+    return mListeners.removeItem(id);
+}
+
 void FrameProcessor::dump(int fd, const Vector<String16>& args) {
     String8 result("    Latest received frame:\n");
     write(fd, result.string(), result.size());
@@ -50,6 +63,7 @@
         sp<Camera2Client> client = mClient.promote();
         if (client == 0) return false;
         device = client->getCameraDevice();
+        if (device == 0) return false;
     }
 
     res = device->waitForNextFrame(kWaitDuration);
@@ -67,20 +81,28 @@
 
 void FrameProcessor::processNewFrames(sp<Camera2Client> &client) {
     status_t res;
+    ATRACE_CALL();
     CameraMetadata frame;
     while ( (res = client->getCameraDevice()->getNextFrame(&frame)) == OK) {
         camera_metadata_entry_t entry;
+
         entry = frame.find(ANDROID_REQUEST_FRAME_COUNT);
         if (entry.count == 0) {
-            ALOGE("%s: Camera %d: Error reading frame number: %s (%d)",
-                    __FUNCTION__, client->getCameraId(), strerror(-res), res);
+            ALOGE("%s: Camera %d: Error reading frame number",
+                    __FUNCTION__, client->getCameraId());
             break;
         }
 
         res = processFaceDetect(frame, client);
         if (res != OK) break;
 
-        mLastFrame.acquire(frame);
+        // Must be last - listener can take ownership of frame
+        res = processListener(frame, client);
+        if (res != OK) break;
+
+        if (!frame.isEmpty()) {
+            mLastFrame.acquire(frame);
+        }
     }
     if (res != NOT_ENOUGH_DATA) {
         ALOGE("%s: Camera %d: Error getting next frame: %s (%d)",
@@ -91,9 +113,43 @@
     return;
 }
 
-status_t FrameProcessor::processFaceDetect(
-    const CameraMetadata &frame, sp<Camera2Client> &client) {
+status_t FrameProcessor::processListener(CameraMetadata &frame,
+        sp<Camera2Client> &client) {
     status_t res;
+    ATRACE_CALL();
+    camera_metadata_entry_t entry;
+
+    entry = frame.find(ANDROID_REQUEST_ID);
+    if (entry.count == 0) {
+        ALOGE("%s: Camera %d: Error reading frame id",
+                __FUNCTION__, client->getCameraId());
+        return BAD_VALUE;
+    }
+    int32_t frameId = entry.data.i32[0];
+    ALOGV("%s: Got frame with ID %d", __FUNCTION__, frameId);
+
+    sp<FilteredListener> listener;
+    {
+        Mutex::Autolock l(mInputMutex);
+        ssize_t listenerIndex = mListeners.indexOfKey(frameId);
+        if (listenerIndex != NAME_NOT_FOUND) {
+            listener = mListeners[listenerIndex].promote();
+            if (listener == 0) {
+                mListeners.removeItemsAt(listenerIndex, 1);
+            }
+        }
+    }
+
+    if (listener != 0) {
+        listener->onFrameAvailable(frameId, frame);
+    }
+    return OK;
+}
+
+status_t FrameProcessor::processFaceDetect(const CameraMetadata &frame,
+        sp<Camera2Client> &client) {
+    status_t res;
+    ATRACE_CALL();
     camera_metadata_ro_entry_t entry;
     bool enableFaceDetect;
     int maxFaces;
@@ -209,6 +265,5 @@
     return OK;
 }
 
-
 }; // namespace camera2
 }; // namespace android
diff --git a/services/camera/libcameraservice/camera2/FrameProcessor.h b/services/camera/libcameraservice/camera2/FrameProcessor.h
index 2cdf7f0..25d489a 100644
--- a/services/camera/libcameraservice/camera2/FrameProcessor.h
+++ b/services/camera/libcameraservice/camera2/FrameProcessor.h
@@ -20,6 +20,7 @@
 #include <utils/Thread.h>
 #include <utils/String16.h>
 #include <utils/Vector.h>
+#include <utils/KeyedVector.h>
 #include "CameraMetadata.h"
 
 namespace android {
@@ -36,6 +37,17 @@
     FrameProcessor(wp<Camera2Client> client);
     ~FrameProcessor();
 
+    struct FilteredListener: virtual public RefBase {
+        // Listener may take ownership of frame
+        virtual void onFrameAvailable(int32_t frameId, CameraMetadata &frame) = 0;
+    };
+
+    // Register a listener for a specific frame ID (android.request.id).
+    // De-registers any existing listeners for that ID
+    status_t registerListener(int32_t id, wp<FilteredListener> listener);
+
+    status_t removeListener(int32_t id);
+
     void dump(int fd, const Vector<String16>& args);
   private:
     static const nsecs_t kWaitDuration = 10000000; // 10 ms
@@ -43,10 +55,17 @@
 
     virtual bool threadLoop();
 
+    Mutex mInputMutex;
+    KeyedVector<int32_t, wp<FilteredListener> > mListeners;
+
     void processNewFrames(sp<Camera2Client> &client);
+
     status_t processFaceDetect(const CameraMetadata &frame,
             sp<Camera2Client> &client);
 
+    status_t processListener(CameraMetadata &frame,
+            sp<Camera2Client> &client);
+
     CameraMetadata mLastFrame;
 };
 
diff --git a/services/camera/libcameraservice/camera2/JpegCompressor.cpp b/services/camera/libcameraservice/camera2/JpegCompressor.cpp
new file mode 100644
index 0000000..55964b6
--- /dev/null
+++ b/services/camera/libcameraservice/camera2/JpegCompressor.cpp
@@ -0,0 +1,220 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_NDEBUG 0
+#define LOG_TAG "JpegCompressor"
+
+#include <utils/Log.h>
+#include <ui/GraphicBufferMapper.h>
+
+#include "JpegCompressor.h"
+
+namespace android {
+namespace camera2 {
+
+JpegCompressor::JpegCompressor():
+        Thread(false),
+        mIsBusy(false),
+        mCaptureTime(0) {
+}
+
+JpegCompressor::~JpegCompressor() {
+    ALOGV("%s", __FUNCTION__);
+    Mutex::Autolock lock(mMutex);
+}
+
+status_t JpegCompressor::start(Vector<CpuConsumer::LockedBuffer*> buffers,
+        nsecs_t captureTime) {
+    ALOGV("%s", __FUNCTION__);
+    Mutex::Autolock busyLock(mBusyMutex);
+
+    if (mIsBusy) {
+        ALOGE("%s: Already processing a buffer!", __FUNCTION__);
+        return INVALID_OPERATION;
+    }
+
+    mIsBusy = true;
+
+    mBuffers = buffers;
+    mCaptureTime = captureTime;
+
+    status_t res;
+    res = run("JpegCompressor");
+    if (res != OK) {
+        ALOGE("%s: Unable to start up compression thread: %s (%d)",
+                __FUNCTION__, strerror(-res), res);
+        //delete mBuffers;  // necessary?
+    }
+    return res;
+}
+
+status_t JpegCompressor::cancel() {
+    ALOGV("%s", __FUNCTION__);
+    requestExitAndWait();
+    return OK;
+}
+
+status_t JpegCompressor::readyToRun() {
+    ALOGV("%s", __FUNCTION__);
+    return OK;
+}
+
+bool JpegCompressor::threadLoop() {
+    ALOGV("%s", __FUNCTION__);
+
+    mAuxBuffer = mBuffers[0];    // input
+    mJpegBuffer = mBuffers[1];    // output
+
+    // Set up error management
+    mJpegErrorInfo = NULL;
+    JpegError error;
+    error.parent = this;
+
+    mCInfo.err = jpeg_std_error(&error);
+    mCInfo.err->error_exit = jpegErrorHandler;
+
+    jpeg_create_compress(&mCInfo);
+    if (checkError("Error initializing compression")) return false;
+
+    // Route compressed data straight to output stream buffer
+    JpegDestination jpegDestMgr;
+    jpegDestMgr.parent = this;
+    jpegDestMgr.init_destination = jpegInitDestination;
+    jpegDestMgr.empty_output_buffer = jpegEmptyOutputBuffer;
+    jpegDestMgr.term_destination = jpegTermDestination;
+
+    mCInfo.dest = &jpegDestMgr;
+
+    // Set up compression parameters
+    mCInfo.image_width = mAuxBuffer->width;
+    mCInfo.image_height = mAuxBuffer->height;
+    mCInfo.input_components = 1; // 3;
+    mCInfo.in_color_space = JCS_GRAYSCALE; // JCS_RGB
+
+    ALOGV("%s: image_width = %d, image_height = %d", __FUNCTION__, mCInfo.image_width, mCInfo.image_height);
+
+    jpeg_set_defaults(&mCInfo);
+    if (checkError("Error configuring defaults")) return false;
+
+    // Do compression
+    jpeg_start_compress(&mCInfo, TRUE);
+    if (checkError("Error starting compression")) return false;
+
+    size_t rowStride = mAuxBuffer->stride;// * 3;
+    const size_t kChunkSize = 32;
+    while (mCInfo.next_scanline < mCInfo.image_height) {
+        JSAMPROW chunk[kChunkSize];
+        for (size_t i = 0 ; i < kChunkSize; i++) {
+            chunk[i] = (JSAMPROW)
+                    (mAuxBuffer->data + (i + mCInfo.next_scanline) * rowStride);
+        }
+        jpeg_write_scanlines(&mCInfo, chunk, kChunkSize);
+        if (checkError("Error while compressing")) return false;
+        if (exitPending()) {
+            ALOGV("%s: Cancel called, exiting early", __FUNCTION__);
+            cleanUp();
+            return false;
+        }
+    }
+
+    jpeg_finish_compress(&mCInfo);
+    if (checkError("Error while finishing compression")) return false;
+
+    cleanUp();
+    return false;
+}
+
+bool JpegCompressor::isBusy() {
+    ALOGV("%s", __FUNCTION__);
+    Mutex::Autolock busyLock(mBusyMutex);
+    return mIsBusy;
+}
+
+// old function -- TODO: update for new buffer type
+bool JpegCompressor::isStreamInUse(uint32_t id) {
+    ALOGV("%s", __FUNCTION__);
+    Mutex::Autolock lock(mBusyMutex);
+
+    if (mBuffers.size() && mIsBusy) {
+        for (size_t i = 0; i < mBuffers.size(); i++) {
+//            if ( mBuffers[i].streamId == (int)id ) return true;
+        }
+    }
+    return false;
+}
+
+bool JpegCompressor::waitForDone(nsecs_t timeout) {
+    ALOGV("%s", __FUNCTION__);
+    Mutex::Autolock lock(mBusyMutex);
+    status_t res = OK;
+    if (mIsBusy) {
+        res = mDone.waitRelative(mBusyMutex, timeout);
+    }
+    return (res == OK);
+}
+
+bool JpegCompressor::checkError(const char *msg) {
+    ALOGV("%s", __FUNCTION__);
+    if (mJpegErrorInfo) {
+        char errBuffer[JMSG_LENGTH_MAX];
+        mJpegErrorInfo->err->format_message(mJpegErrorInfo, errBuffer);
+        ALOGE("%s: %s: %s",
+                __FUNCTION__, msg, errBuffer);
+        cleanUp();
+        mJpegErrorInfo = NULL;
+        return true;
+    }
+    return false;
+}
+
+void JpegCompressor::cleanUp() {
+    ALOGV("%s", __FUNCTION__);
+    jpeg_destroy_compress(&mCInfo);
+    Mutex::Autolock lock(mBusyMutex);
+    mIsBusy = false;
+    mDone.signal();
+}
+
+void JpegCompressor::jpegErrorHandler(j_common_ptr cinfo) {
+    ALOGV("%s", __FUNCTION__);
+    JpegError *error = static_cast<JpegError*>(cinfo->err);
+    error->parent->mJpegErrorInfo = cinfo;
+}
+
+void JpegCompressor::jpegInitDestination(j_compress_ptr cinfo) {
+    ALOGV("%s", __FUNCTION__);
+    JpegDestination *dest= static_cast<JpegDestination*>(cinfo->dest);
+    ALOGV("%s: Setting destination to %p, size %d",
+            __FUNCTION__, dest->parent->mJpegBuffer->data, kMaxJpegSize);
+    dest->next_output_byte = (JOCTET*)(dest->parent->mJpegBuffer->data);
+    dest->free_in_buffer = kMaxJpegSize;
+}
+
+boolean JpegCompressor::jpegEmptyOutputBuffer(j_compress_ptr cinfo) {
+    ALOGV("%s", __FUNCTION__);
+    ALOGE("%s: JPEG destination buffer overflow!",
+            __FUNCTION__);
+    return true;
+}
+
+void JpegCompressor::jpegTermDestination(j_compress_ptr cinfo) {
+    ALOGV("%s", __FUNCTION__);
+    ALOGV("%s: Done writing JPEG data. %d bytes left in buffer",
+            __FUNCTION__, cinfo->dest->free_in_buffer);
+}
+
+}; // namespace camera2
+}; // namespace android
diff --git a/services/camera/libcameraservice/camera2/JpegCompressor.h b/services/camera/libcameraservice/camera2/JpegCompressor.h
new file mode 100644
index 0000000..945b1de
--- /dev/null
+++ b/services/camera/libcameraservice/camera2/JpegCompressor.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+/**
+ * This class simulates a hardware JPEG compressor.  It receives image buffers
+ * in RGBA_8888 format, processes them in a worker thread, and then pushes them
+ * out to their destination stream.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA_JPEGCOMPRESSOR_H
+#define ANDROID_SERVERS_CAMERA_JPEGCOMPRESSOR_H
+
+#include "utils/Thread.h"
+#include "utils/Mutex.h"
+#include "utils/Timers.h"
+#include "utils/Vector.h"
+//#include "Base.h"
+#include <stdio.h>
+#include <gui/CpuConsumer.h>
+
+extern "C" {
+#include <jpeglib.h>
+}
+
+
+namespace android {
+namespace camera2 {
+
+class JpegCompressor: private Thread, public virtual RefBase {
+  public:
+
+    JpegCompressor();
+    ~JpegCompressor();
+
+    // Start compressing COMPRESSED format buffers; JpegCompressor takes
+    // ownership of the Buffers vector.
+    status_t start(Vector<CpuConsumer::LockedBuffer*> buffers,
+            nsecs_t captureTime);
+
+    status_t cancel();
+
+    bool isBusy();
+    bool isStreamInUse(uint32_t id);
+
+    bool waitForDone(nsecs_t timeout);
+
+    // TODO: Measure this
+    static const size_t kMaxJpegSize = 300000;
+
+  private:
+    Mutex mBusyMutex;
+    Mutex mMutex;
+    bool mIsBusy;
+    Condition mDone;
+    nsecs_t mCaptureTime;
+
+    Vector<CpuConsumer::LockedBuffer*> mBuffers;
+    CpuConsumer::LockedBuffer *mJpegBuffer;
+    CpuConsumer::LockedBuffer *mAuxBuffer;
+    bool mFoundJpeg, mFoundAux;
+
+    jpeg_compress_struct mCInfo;
+
+    struct JpegError : public jpeg_error_mgr {
+        JpegCompressor *parent;
+    };
+    j_common_ptr mJpegErrorInfo;
+
+    struct JpegDestination : public jpeg_destination_mgr {
+        JpegCompressor *parent;
+    };
+
+    static void jpegErrorHandler(j_common_ptr cinfo);
+
+    static void jpegInitDestination(j_compress_ptr cinfo);
+    static boolean jpegEmptyOutputBuffer(j_compress_ptr cinfo);
+    static void jpegTermDestination(j_compress_ptr cinfo);
+
+    bool checkError(const char *msg);
+    void cleanUp();
+
+    /**
+     * Inherited Thread virtual overrides
+     */
+  private:
+    virtual status_t readyToRun();
+    virtual bool threadLoop();
+};
+
+}; // namespace camera2
+}; // namespace android
+
+#endif
diff --git a/services/camera/libcameraservice/camera2/CaptureProcessor.cpp b/services/camera/libcameraservice/camera2/JpegProcessor.cpp
similarity index 89%
rename from services/camera/libcameraservice/camera2/CaptureProcessor.cpp
rename to services/camera/libcameraservice/camera2/JpegProcessor.cpp
index b17f9d2..b230d2d 100644
--- a/services/camera/libcameraservice/camera2/CaptureProcessor.cpp
+++ b/services/camera/libcameraservice/camera2/JpegProcessor.cpp
@@ -14,14 +14,14 @@
  * limitations under the License.
  */
 
-#define LOG_TAG "Camera2Client::CaptureProcessor"
+#define LOG_TAG "Camera2Client::JpegProcessor"
 #define ATRACE_TAG ATRACE_TAG_CAMERA
 //#define LOG_NDEBUG 0
 
 #include <utils/Log.h>
 #include <utils/Trace.h>
 
-#include "CaptureProcessor.h"
+#include "JpegProcessor.h"
 #include <gui/SurfaceTextureClient.h>
 #include "../Camera2Device.h"
 #include "../Camera2Client.h"
@@ -30,18 +30,22 @@
 namespace android {
 namespace camera2 {
 
-CaptureProcessor::CaptureProcessor(wp<Camera2Client> client):
+JpegProcessor::JpegProcessor(
+    wp<Camera2Client> client,
+    wp<CaptureSequencer> sequencer):
         Thread(false),
         mClient(client),
+        mSequencer(sequencer),
         mCaptureAvailable(false),
         mCaptureStreamId(NO_STREAM) {
 }
 
-CaptureProcessor::~CaptureProcessor() {
+JpegProcessor::~JpegProcessor() {
     ALOGV("%s: Exit", __FUNCTION__);
+    deleteStream();
 }
 
-void CaptureProcessor::onFrameAvailable() {
+void JpegProcessor::onFrameAvailable() {
     Mutex::Autolock l(mInputMutex);
     if (!mCaptureAvailable) {
         mCaptureAvailable = true;
@@ -49,7 +53,7 @@
     }
 }
 
-status_t CaptureProcessor::updateStream(const Parameters &params) {
+status_t JpegProcessor::updateStream(const Parameters &params) {
     ATRACE_CALL();
     ALOGV("%s", __FUNCTION__);
     status_t res;
@@ -127,7 +131,7 @@
     return OK;
 }
 
-status_t CaptureProcessor::deleteStream() {
+status_t JpegProcessor::deleteStream() {
     ATRACE_CALL();
     status_t res;
 
@@ -139,20 +143,25 @@
         sp<Camera2Device> device = client->getCameraDevice();
 
         device->deleteStream(mCaptureStreamId);
+
+        mCaptureHeap.clear();
+        mCaptureWindow.clear();
+        mCaptureConsumer.clear();
+
         mCaptureStreamId = NO_STREAM;
     }
     return OK;
 }
 
-int CaptureProcessor::getStreamId() const {
+int JpegProcessor::getStreamId() const {
     Mutex::Autolock l(mInputMutex);
     return mCaptureStreamId;
 }
 
-void CaptureProcessor::dump(int fd, const Vector<String16>& args) {
+void JpegProcessor::dump(int fd, const Vector<String16>& args) const {
 }
 
-bool CaptureProcessor::threadLoop() {
+bool JpegProcessor::threadLoop() {
     status_t res;
 
     {
@@ -174,7 +183,7 @@
     return true;
 }
 
-status_t CaptureProcessor::processNewCapture(sp<Camera2Client> &client) {
+status_t JpegProcessor::processNewCapture(sp<Camera2Client> &client) {
     ATRACE_CALL();
     status_t res;
     sp<Camera2Heap> captureHeap;
@@ -200,10 +209,7 @@
 
         switch (l.mParameters.state) {
             case Parameters::STILL_CAPTURE:
-                l.mParameters.state = Parameters::STOPPED;
-                break;
             case Parameters::VIDEO_SNAPSHOT:
-                l.mParameters.state = Parameters::RECORD;
                 break;
             default:
                 ALOGE("%s: Camera %d: Still image produced unexpectedly "
@@ -224,6 +230,11 @@
         return OK;
     }
 
+    sp<CaptureSequencer> sequencer = mSequencer.promote();
+    if (sequencer != 0) {
+        sequencer->onCaptureAvailable(imgBuffer.timestamp);
+    }
+
     // TODO: Optimize this to avoid memcopy
     void* captureMemory = mCaptureHeap->mHeap->getBase();
     size_t size = mCaptureHeap->mHeap->getSize();
diff --git a/services/camera/libcameraservice/camera2/CaptureProcessor.h b/services/camera/libcameraservice/camera2/JpegProcessor.h
similarity index 83%
rename from services/camera/libcameraservice/camera2/CaptureProcessor.h
rename to services/camera/libcameraservice/camera2/JpegProcessor.h
index 8e35739..6e7a860 100644
--- a/services/camera/libcameraservice/camera2/CaptureProcessor.h
+++ b/services/camera/libcameraservice/camera2/JpegProcessor.h
@@ -14,8 +14,8 @@
  * limitations under the License.
  */
 
-#ifndef ANDROID_SERVERS_CAMERA_CAMERA2_CAPTUREPROCESSOR_H
-#define ANDROID_SERVERS_CAMERA_CAMERA2_CAPTUREPROCESSOR_H
+#ifndef ANDROID_SERVERS_CAMERA_CAMERA2_JPEGPROCESSOR_H
+#define ANDROID_SERVERS_CAMERA_CAMERA2_JPEGPROCESSOR_H
 
 #include <utils/Thread.h>
 #include <utils/String16.h>
@@ -33,14 +33,16 @@
 
 namespace camera2 {
 
+class CaptureSequencer;
+
 /***
  * Still image capture output image processing
  */
-class CaptureProcessor:
+class JpegProcessor:
             public Thread, public CpuConsumer::FrameAvailableListener {
   public:
-    CaptureProcessor(wp<Camera2Client> client);
-    ~CaptureProcessor();
+    JpegProcessor(wp<Camera2Client> client, wp<CaptureSequencer> sequencer);
+    ~JpegProcessor();
 
     void onFrameAvailable();
 
@@ -48,10 +50,11 @@
     status_t deleteStream();
     int getStreamId() const;
 
-    void dump(int fd, const Vector<String16>& args);
+    void dump(int fd, const Vector<String16>& args) const;
   private:
     static const nsecs_t kWaitDuration = 10000000; // 10 ms
     wp<Camera2Client> mClient;
+    wp<CaptureSequencer> mSequencer;
 
     mutable Mutex mInputMutex;
     bool mCaptureAvailable;
diff --git a/services/camera/libcameraservice/camera2/Parameters.cpp b/services/camera/libcameraservice/camera2/Parameters.cpp
index 2f7d023..e5942dc 100644
--- a/services/camera/libcameraservice/camera2/Parameters.cpp
+++ b/services/camera/libcameraservice/camera2/Parameters.cpp
@@ -18,6 +18,9 @@
 #define ATRACE_TAG ATRACE_TAG_CAMERA
 //#define LOG_NDEBUG 0
 
+#include <utils/Log.h>
+#include <utils/Trace.h>
+
 #include <math.h>
 #include <stdlib.h>
 
@@ -738,11 +741,17 @@
     enableFaceDetect = false;
 
     enableFocusMoveMessages = false;
-    afTriggerCounter = 0;
+    afTriggerCounter = 1;
     currentAfTriggerId = -1;
 
+    precaptureTriggerCounter = 1;
+
     previewCallbackFlags = 0;
 
+    zslMode = false;
+
+    lightFx = LIGHTFX_NONE;
+
     state = STOPPED;
 
     paramsFlattened = params.flatten();
@@ -1310,6 +1319,10 @@
         ALOGE("%s: Video stabilization not supported", __FUNCTION__);
     }
 
+    // LIGHTFX
+    validatedParams.lightFx = lightFxStringToEnum(
+        newParams.get(CameraParameters::KEY_LIGHTFX));
+
     /** Update internal parameters */
 
     validatedParams.paramsFlattened = params;
@@ -1318,6 +1331,209 @@
     return OK;
 }
 
+status_t Parameters::updateRequest(CameraMetadata *request) const {
+    ATRACE_CALL();
+    status_t res;
+
+    uint8_t metadataMode = ANDROID_REQUEST_METADATA_FULL;
+    res = request->update(ANDROID_REQUEST_METADATA_MODE,
+            &metadataMode, 1);
+    if (res != OK) return res;
+
+    res = request->update(ANDROID_CONTROL_AE_TARGET_FPS_RANGE,
+            previewFpsRange, 2);
+    if (res != OK) return res;
+
+    res = request->update(ANDROID_CONTROL_AWB_MODE,
+            &wbMode, 1);
+    if (res != OK) return res;
+
+    uint8_t reqWbLock = autoWhiteBalanceLock ?
+            ANDROID_CONTROL_AWB_LOCK_ON : ANDROID_CONTROL_AWB_LOCK_OFF;
+    res = request->update(ANDROID_CONTROL_AWB_LOCK,
+            &reqWbLock, 1);
+
+    res = request->update(ANDROID_CONTROL_EFFECT_MODE,
+            &effectMode, 1);
+    if (res != OK) return res;
+    res = request->update(ANDROID_CONTROL_AE_ANTIBANDING_MODE,
+            &antibandingMode, 1);
+    if (res != OK) return res;
+
+    uint8_t reqControlMode =
+            (sceneMode == ANDROID_CONTROL_SCENE_MODE_UNSUPPORTED) ?
+            ANDROID_CONTROL_AUTO : ANDROID_CONTROL_USE_SCENE_MODE;
+    res = request->update(ANDROID_CONTROL_MODE,
+            &reqControlMode, 1);
+    if (res != OK) return res;
+    if (reqControlMode == ANDROID_CONTROL_USE_SCENE_MODE) {
+        res = request->update(ANDROID_CONTROL_SCENE_MODE,
+                &sceneMode, 1);
+        if (res != OK) return res;
+    }
+
+    uint8_t reqFlashMode = ANDROID_FLASH_OFF;
+    uint8_t reqAeMode;
+    switch (flashMode) {
+        case Parameters::FLASH_MODE_OFF:
+            reqAeMode = ANDROID_CONTROL_AE_ON; break;
+        case Parameters::FLASH_MODE_AUTO:
+            reqAeMode = ANDROID_CONTROL_AE_ON_AUTO_FLASH; break;
+        case Parameters::FLASH_MODE_ON:
+            reqAeMode = ANDROID_CONTROL_AE_ON_ALWAYS_FLASH; break;
+        case Parameters::FLASH_MODE_TORCH:
+            reqAeMode = ANDROID_CONTROL_AE_ON;
+            reqFlashMode = ANDROID_FLASH_TORCH;
+            break;
+        case Parameters::FLASH_MODE_RED_EYE:
+            reqAeMode = ANDROID_CONTROL_AE_ON_AUTO_FLASH_REDEYE; break;
+        default:
+            ALOGE("%s: Camera %d: Unknown flash mode %d", __FUNCTION__,
+                    cameraId, flashMode);
+            return BAD_VALUE;
+    }
+    res = request->update(ANDROID_FLASH_MODE,
+            &reqFlashMode, 1);
+    if (res != OK) return res;
+    res = request->update(ANDROID_CONTROL_AE_MODE,
+            &reqAeMode, 1);
+    if (res != OK) return res;
+
+    uint8_t reqAeLock = autoExposureLock ?
+            ANDROID_CONTROL_AE_LOCK_ON : ANDROID_CONTROL_AE_LOCK_OFF;
+    res = request->update(ANDROID_CONTROL_AE_LOCK,
+            &reqAeLock, 1);
+
+    float reqFocusDistance = 0; // infinity focus in diopters
+    uint8_t reqFocusMode;
+    switch (focusMode) {
+        case Parameters::FOCUS_MODE_AUTO:
+        case Parameters::FOCUS_MODE_MACRO:
+        case Parameters::FOCUS_MODE_CONTINUOUS_VIDEO:
+        case Parameters::FOCUS_MODE_CONTINUOUS_PICTURE:
+        case Parameters::FOCUS_MODE_EDOF:
+            reqFocusMode = focusMode;
+            break;
+        case Parameters::FOCUS_MODE_INFINITY:
+        case Parameters::FOCUS_MODE_FIXED:
+            reqFocusMode = ANDROID_CONTROL_AF_OFF;
+            break;
+        default:
+            ALOGE("%s: Camera %d: Unknown focus mode %d", __FUNCTION__,
+                    cameraId, focusMode);
+            return BAD_VALUE;
+    }
+    res = request->update(ANDROID_LENS_FOCUS_DISTANCE,
+            &reqFocusDistance, 1);
+    if (res != OK) return res;
+    res = request->update(ANDROID_CONTROL_AF_MODE,
+            &reqFocusMode, 1);
+    if (res != OK) return res;
+
+    size_t reqFocusingAreasSize = focusingAreas.size() * 5;
+    int32_t *reqFocusingAreas = new int32_t[reqFocusingAreasSize];
+    for (size_t i = 0; i < reqFocusingAreasSize; i += 5) {
+        if (focusingAreas[i].weight != 0) {
+            reqFocusingAreas[i + 0] =
+                    normalizedXToArray(focusingAreas[i].left);
+            reqFocusingAreas[i + 1] =
+                    normalizedYToArray(focusingAreas[i].top);
+            reqFocusingAreas[i + 2] =
+                    normalizedXToArray(focusingAreas[i].right);
+            reqFocusingAreas[i + 3] =
+                    normalizedYToArray(focusingAreas[i].bottom);
+        } else {
+            reqFocusingAreas[i + 0] = 0;
+            reqFocusingAreas[i + 1] = 0;
+            reqFocusingAreas[i + 2] = 0;
+            reqFocusingAreas[i + 3] = 0;
+        }
+        reqFocusingAreas[i + 4] = focusingAreas[i].weight;
+    }
+    res = request->update(ANDROID_CONTROL_AF_REGIONS,
+            reqFocusingAreas, reqFocusingAreasSize);
+    if (res != OK) return res;
+    delete[] reqFocusingAreas;
+
+    res = request->update(ANDROID_CONTROL_AE_EXP_COMPENSATION,
+            &exposureCompensation, 1);
+    if (res != OK) return res;
+
+    size_t reqMeteringAreasSize = meteringAreas.size() * 5;
+    int32_t *reqMeteringAreas = new int32_t[reqMeteringAreasSize];
+    for (size_t i = 0; i < reqMeteringAreasSize; i += 5) {
+        if (meteringAreas[i].weight != 0) {
+            reqMeteringAreas[i + 0] =
+                normalizedXToArray(meteringAreas[i].left);
+            reqMeteringAreas[i + 1] =
+                normalizedYToArray(meteringAreas[i].top);
+            reqMeteringAreas[i + 2] =
+                normalizedXToArray(meteringAreas[i].right);
+            reqMeteringAreas[i + 3] =
+                normalizedYToArray(meteringAreas[i].bottom);
+        } else {
+            reqMeteringAreas[i + 0] = 0;
+            reqMeteringAreas[i + 1] = 0;
+            reqMeteringAreas[i + 2] = 0;
+            reqMeteringAreas[i + 3] = 0;
+        }
+        reqMeteringAreas[i + 4] = meteringAreas[i].weight;
+    }
+    res = request->update(ANDROID_CONTROL_AE_REGIONS,
+            reqMeteringAreas, reqMeteringAreasSize);
+    if (res != OK) return res;
+
+    res = request->update(ANDROID_CONTROL_AWB_REGIONS,
+            reqMeteringAreas, reqMeteringAreasSize);
+    if (res != OK) return res;
+    delete[] reqMeteringAreas;
+
+    // Need to convert zoom index into a crop rectangle. The rectangle is
+    // chosen to maximize its area on the sensor
+
+    camera_metadata_ro_entry_t maxDigitalZoom =
+            staticInfo(ANDROID_SCALER_AVAILABLE_MAX_ZOOM);
+    float zoomIncrement = (maxDigitalZoom.data.f[0] - 1) /
+            (NUM_ZOOM_STEPS-1);
+    float zoomRatio = 1 + zoomIncrement * zoom;
+
+    float zoomLeft, zoomTop, zoomWidth, zoomHeight;
+    if (previewWidth >= previewHeight) {
+        zoomWidth =  fastInfo.arrayWidth / zoomRatio;
+        zoomHeight = zoomWidth *
+                previewHeight / previewWidth;
+    } else {
+        zoomHeight = fastInfo.arrayHeight / zoomRatio;
+        zoomWidth = zoomHeight *
+                previewWidth / previewHeight;
+    }
+    zoomLeft = (fastInfo.arrayWidth - zoomWidth) / 2;
+    zoomTop = (fastInfo.arrayHeight - zoomHeight) / 2;
+
+    int32_t reqCropRegion[3] = { zoomLeft, zoomTop, zoomWidth };
+    res = request->update(ANDROID_SCALER_CROP_REGION,
+            reqCropRegion, 3);
+    if (res != OK) return res;
+
+    // TODO: Decide how to map recordingHint, or whether just to ignore it
+
+    uint8_t reqVstabMode = videoStabilization ?
+            ANDROID_CONTROL_VIDEO_STABILIZATION_ON :
+            ANDROID_CONTROL_VIDEO_STABILIZATION_OFF;
+    res = request->update(ANDROID_CONTROL_VIDEO_STABILIZATION_MODE,
+            &reqVstabMode, 1);
+    if (res != OK) return res;
+
+    uint8_t reqFaceDetectMode = enableFaceDetect ?
+            fastInfo.bestFaceDetectMode :
+            (uint8_t)ANDROID_STATS_FACE_DETECTION_OFF;
+    res = request->update(ANDROID_STATS_FACE_DETECT_MODE,
+            &reqFaceDetectMode, 1);
+    if (res != OK) return res;
+
+    return OK;
+}
+
 const char* Parameters::getStateName(State state) {
 #define CASE_ENUM_TO_CHAR(x) case x: return(#x); break;
     switch(state) {
@@ -1518,6 +1734,16 @@
         Parameters::FOCUS_MODE_INVALID;
 }
 
+Parameters::Parameters::lightFxMode_t Parameters::lightFxStringToEnum(
+        const char *lightFxMode) {
+    return
+        !strcmp(lightFxMode, CameraParameters::LIGHTFX_LOWLIGHT) ?
+            Parameters::LIGHTFX_LOWLIGHT :
+        !strcmp(lightFxMode, CameraParameters::LIGHTFX_HDR) ?
+            Parameters::LIGHTFX_HDR :
+        Parameters::LIGHTFX_NONE;
+}
+
 status_t Parameters::parseAreas(const char *areasCStr,
         Vector<Parameters::Area> *areas) {
     static const size_t NUM_FIELDS = 5;
diff --git a/services/camera/libcameraservice/camera2/Parameters.h b/services/camera/libcameraservice/camera2/Parameters.h
index 817d001..f768605 100644
--- a/services/camera/libcameraservice/camera2/Parameters.h
+++ b/services/camera/libcameraservice/camera2/Parameters.h
@@ -29,12 +29,17 @@
 namespace android {
 namespace camera2 {
 
-// Current camera state; this is the full state of the Camera under the old
-// camera API (contents of the CameraParameters object in a more-efficient
-// format, plus other state). The enum values are mostly based off the
-// corresponding camera2 enums, not the camera1 strings. A few are defined here
-// if they don't cleanly map to camera2 values.
+/**
+ * Current camera state; this is the full state of the Camera under the old
+ * camera API (contents of the CameraParameters object in a more-efficient
+ * format, plus other state). The enum values are mostly based off the
+ * corresponding camera2 enums, not the camera1 strings. A few are defined here
+ * if they don't cleanly map to camera2 values.
+ */
 struct Parameters {
+    /**
+     * Parameters and other state
+     */
     int cameraId;
     int cameraFacing;
 
@@ -104,6 +109,12 @@
     bool recordingHint;
     bool videoStabilization;
 
+    enum lightFxMode_t {
+        LIGHTFX_NONE = 0,
+        LIGHTFX_LOWLIGHT,
+        LIGHTFX_HDR
+    } lightFx;
+
     String8 paramsFlattened;
 
     // These parameters are also part of the camera API-visible state, but not
@@ -117,9 +128,13 @@
     int currentAfTriggerId;
     bool afInMotion;
 
+    int precaptureTriggerCounter;
+
     uint32_t previewCallbackFlags;
     bool previewCallbackOneShot;
 
+    bool zslMode;
+
     // Overall camera state
     enum State {
         DISCONNECTED,
@@ -132,7 +147,7 @@
     } state;
 
     // Number of zoom steps to simulate
-    static const unsigned int NUM_ZOOM_STEPS = 10;
+    static const unsigned int NUM_ZOOM_STEPS = 30;
 
     // Full static camera info, object owned by someone else, such as
     // Camera2Device.
@@ -149,7 +164,9 @@
         int32_t maxFaces;
     } fastInfo;
 
-    // Parameter manipulation and setup methods
+    /**
+     * Parameter manipulation and setup methods
+     */
 
     Parameters(int cameraId, int cameraFacing);
     ~Parameters();
@@ -170,6 +187,9 @@
     // Validate and update camera parameters based on new settings
     status_t set(const String8 &params);
 
+    // Update passed-in request for common parameters
+    status_t updateRequest(CameraMetadata *request) const;
+
     // Static methods for debugging and converting between camera1 and camera2
     // parameters
 
@@ -184,6 +204,7 @@
     static int sceneModeStringToEnum(const char *sceneMode);
     static flashMode_t flashModeStringToEnum(const char *flashMode);
     static focusMode_t focusModeStringToEnum(const char *focusMode);
+    static lightFxMode_t lightFxStringToEnum(const char *lightFxMode);
     static status_t parseAreas(const char *areasCStr,
             Vector<Area> *areas);
     static status_t validateAreas(const Vector<Area> &areas,
diff --git a/services/camera/libcameraservice/camera2/ZslProcessor.cpp b/services/camera/libcameraservice/camera2/ZslProcessor.cpp
new file mode 100644
index 0000000..ac02afc
--- /dev/null
+++ b/services/camera/libcameraservice/camera2/ZslProcessor.cpp
@@ -0,0 +1,383 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "Camera2Client::ZslProcessor"
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+//#define LOG_NDEBUG 0
+//#define LOG_NNDEBUG 0
+
+#ifdef LOG_NNDEBUG
+#define ALOGVV(...) ALOGV(__VA_ARGS__)
+#else
+#define ALOGVV(...) ((void)0)
+#endif
+
+#include <utils/Log.h>
+#include <utils/Trace.h>
+
+#include "ZslProcessor.h"
+#include <gui/SurfaceTextureClient.h>
+#include "../Camera2Device.h"
+#include "../Camera2Client.h"
+
+
+namespace android {
+namespace camera2 {
+
+ZslProcessor::ZslProcessor(
+    wp<Camera2Client> client,
+    wp<CaptureSequencer> sequencer):
+        Thread(false),
+        mState(RUNNING),
+        mClient(client),
+        mSequencer(sequencer),
+        mZslBufferAvailable(false),
+        mZslStreamId(NO_STREAM),
+        mZslReprocessStreamId(NO_STREAM),
+        mFrameListHead(0),
+        mZslQueueHead(0),
+        mZslQueueTail(0) {
+    mZslQueue.insertAt(0, kZslBufferDepth);
+    mFrameList.insertAt(0, kFrameListDepth);
+    sp<CaptureSequencer> captureSequencer = mSequencer.promote();
+    if (captureSequencer != 0) captureSequencer->setZslProcessor(this);
+}
+
+ZslProcessor::~ZslProcessor() {
+    ALOGV("%s: Exit", __FUNCTION__);
+    deleteStream();
+}
+
+void ZslProcessor::onFrameAvailable() {
+    Mutex::Autolock l(mInputMutex);
+    if (!mZslBufferAvailable) {
+        mZslBufferAvailable = true;
+        mZslBufferAvailableSignal.signal();
+    }
+}
+
+void ZslProcessor::onFrameAvailable(int32_t frameId, CameraMetadata &frame) {
+    Mutex::Autolock l(mInputMutex);
+    camera_metadata_entry_t entry;
+    entry = frame.find(ANDROID_SENSOR_TIMESTAMP);
+    nsecs_t timestamp = entry.data.i64[0];
+    ALOGVV("Got preview frame for timestamp %lld", timestamp);
+
+    if (mState != RUNNING) return;
+
+    mFrameList.editItemAt(mFrameListHead).acquire(frame);
+    mFrameListHead = (mFrameListHead + 1) % kFrameListDepth;
+
+    findMatchesLocked();
+}
+
+void ZslProcessor::onBufferReleased(buffer_handle_t *handle) {
+    Mutex::Autolock l(mInputMutex);
+
+    buffer_handle_t *expectedHandle =
+            &(mZslQueue[mZslQueueTail].buffer.mGraphicBuffer->handle);
+
+    if (handle != expectedHandle) {
+        ALOGE("%s: Expected buffer %p, got buffer %p",
+                __FUNCTION__, expectedHandle, handle);
+    }
+
+    mState = RUNNING;
+}
+
+status_t ZslProcessor::updateStream(const Parameters &params) {
+    ATRACE_CALL();
+    ALOGV("%s: Configuring ZSL streams", __FUNCTION__);
+    status_t res;
+
+    Mutex::Autolock l(mInputMutex);
+
+    sp<Camera2Client> client = mClient.promote();
+    if (client == 0) return OK;
+    sp<Camera2Device> device = client->getCameraDevice();
+
+    if (mZslConsumer == 0) {
+        // Create CPU buffer queue endpoint
+        mZslConsumer = new BufferItemConsumer(
+            GRALLOC_USAGE_HW_CAMERA_ZSL,
+            kZslBufferDepth,
+            true);
+        mZslConsumer->setFrameAvailableListener(this);
+        mZslConsumer->setName(String8("Camera2Client::ZslConsumer"));
+        mZslWindow = new SurfaceTextureClient(
+            mZslConsumer->getProducerInterface());
+    }
+
+    if (mZslStreamId != NO_STREAM) {
+        // Check if stream parameters have to change
+        uint32_t currentWidth, currentHeight;
+        res = device->getStreamInfo(mZslStreamId,
+                &currentWidth, &currentHeight, 0);
+        if (res != OK) {
+            ALOGE("%s: Camera %d: Error querying capture output stream info: "
+                    "%s (%d)", __FUNCTION__,
+                    client->getCameraId(), strerror(-res), res);
+            return res;
+        }
+        if (currentWidth != (uint32_t)params.pictureWidth ||
+                currentHeight != (uint32_t)params.pictureHeight) {
+            res = device->deleteStream(mZslReprocessStreamId);
+            if (res != OK) {
+                ALOGE("%s: Camera %d: Unable to delete old reprocess stream "
+                        "for ZSL: %s (%d)", __FUNCTION__,
+                        client->getCameraId(), strerror(-res), res);
+                return res;
+            }
+            res = device->deleteStream(mZslStreamId);
+            if (res != OK) {
+                ALOGE("%s: Camera %d: Unable to delete old output stream "
+                        "for ZSL: %s (%d)", __FUNCTION__,
+                        client->getCameraId(), strerror(-res), res);
+                return res;
+            }
+            mZslStreamId = NO_STREAM;
+        }
+    }
+
+    if (mZslStreamId == NO_STREAM) {
+        // Create stream for HAL production
+        res = device->createStream(mZslWindow,
+                params.pictureWidth, params.pictureHeight,
+                HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED, 0,
+                &mZslStreamId);
+        if (res != OK) {
+            ALOGE("%s: Camera %d: Can't create output stream for ZSL: "
+                    "%s (%d)", __FUNCTION__, client->getCameraId(),
+                    strerror(-res), res);
+            return res;
+        }
+        res = device->createReprocessStreamFromStream(mZslStreamId,
+                &mZslReprocessStreamId);
+        if (res != OK) {
+            ALOGE("%s: Camera %d: Can't create reprocess stream for ZSL: "
+                    "%s (%d)", __FUNCTION__, client->getCameraId(),
+                    strerror(-res), res);
+            return res;
+        }
+    }
+    client->registerFrameListener(Camera2Client::kPreviewRequestId, this);
+
+    return OK;
+}
+
+status_t ZslProcessor::deleteStream() {
+    ATRACE_CALL();
+    status_t res;
+
+    Mutex::Autolock l(mInputMutex);
+
+    if (mZslStreamId != NO_STREAM) {
+        sp<Camera2Client> client = mClient.promote();
+        if (client == 0) return OK;
+        sp<Camera2Device> device = client->getCameraDevice();
+
+        device->deleteStream(mZslReprocessStreamId);
+        mZslReprocessStreamId = NO_STREAM;
+        device->deleteStream(mZslStreamId);
+
+        mZslWindow.clear();
+        mZslConsumer.clear();
+
+        mZslStreamId = NO_STREAM;
+    }
+    return OK;
+}
+
+int ZslProcessor::getStreamId() const {
+    Mutex::Autolock l(mInputMutex);
+    return mZslStreamId;
+}
+
+int ZslProcessor::getReprocessStreamId() const {
+    Mutex::Autolock l(mInputMutex);
+    return mZslReprocessStreamId;
+}
+
+status_t ZslProcessor::pushToReprocess(int32_t requestId) {
+    ALOGV("%s: Send in reprocess request with id %d",
+            __FUNCTION__, requestId);
+    Mutex::Autolock l(mInputMutex);
+    status_t res;
+    sp<Camera2Client> client = mClient.promote();
+
+    if (client == 0) return false;
+
+    if (mZslQueueTail != mZslQueueHead) {
+        buffer_handle_t *handle =
+            &(mZslQueue[mZslQueueTail].buffer.mGraphicBuffer->handle);
+        CameraMetadata request = mZslQueue[mZslQueueTail].frame;
+        uint8_t requestType = ANDROID_REQUEST_TYPE_REPROCESS;
+        res = request.update(ANDROID_REQUEST_TYPE,
+                &requestType, 1);
+        uint8_t inputStreams[1] = { mZslReprocessStreamId };
+        if (res == OK) request.update(ANDROID_REQUEST_INPUT_STREAMS,
+                inputStreams, 1);
+        uint8_t outputStreams[1] = { client->getCaptureStreamId() };
+        if (res == OK) request.update(ANDROID_REQUEST_OUTPUT_STREAMS,
+                outputStreams, 1);
+        res = request.update(ANDROID_REQUEST_ID,
+                &requestId, 1);
+
+        if (res != OK ) {
+            ALOGE("%s: Unable to update frame to a reprocess request", __FUNCTION__);
+            return INVALID_OPERATION;
+        }
+
+        res = client->getCameraDevice()->pushReprocessBuffer(mZslReprocessStreamId,
+                handle, this);
+        if (res != OK) {
+            ALOGE("%s: Unable to push buffer for reprocessing: %s (%d)",
+                    __FUNCTION__, strerror(-res), res);
+            return res;
+        }
+
+        res = client->getCameraDevice()->capture(request);
+        if (res != OK ) {
+            ALOGE("%s: Unable to send ZSL reprocess request to capture: %s (%d)",
+                    __FUNCTION__, strerror(-res), res);
+            return res;
+        }
+
+        mState = LOCKED;
+    } else {
+        ALOGE("%s: Nothing to push", __FUNCTION__);
+        return BAD_VALUE;
+    }
+    return OK;
+}
+
+void ZslProcessor::dump(int fd, const Vector<String16>& args) const {
+}
+
+bool ZslProcessor::threadLoop() {
+    status_t res;
+
+    {
+        Mutex::Autolock l(mInputMutex);
+        while (!mZslBufferAvailable) {
+            res = mZslBufferAvailableSignal.waitRelative(mInputMutex,
+                    kWaitDuration);
+            if (res == TIMED_OUT) return true;
+        }
+        mZslBufferAvailable = false;
+    }
+
+    do {
+        sp<Camera2Client> client = mClient.promote();
+        if (client == 0) return false;
+        res = processNewZslBuffer(client);
+    } while (res == OK);
+
+    return true;
+}
+
+status_t ZslProcessor::processNewZslBuffer(sp<Camera2Client> &client) {
+    ATRACE_CALL();
+    status_t res;
+    Mutex::Autolock l(mInputMutex);
+
+    if (mState == LOCKED) {
+        BufferItemConsumer::BufferItem item;
+        res = mZslConsumer->acquireBuffer(&item);
+        if (res != OK) {
+            if (res != BufferItemConsumer::NO_BUFFER_AVAILABLE) {
+                ALOGE("%s: Camera %d: Error receiving ZSL image buffer: "
+                        "%s (%d)", __FUNCTION__,
+                        client->getCameraId(), strerror(-res), res);
+            }
+            return res;
+        }
+        mZslConsumer->releaseBuffer(item);
+        return OK;
+    }
+
+    ALOGVV("Got ZSL buffer: head: %d, tail: %d", mZslQueueHead, mZslQueueTail);
+
+    if ( (mZslQueueHead + 1) % kZslBufferDepth == mZslQueueTail) {
+        mZslConsumer->releaseBuffer(mZslQueue[mZslQueueTail].buffer);
+        mZslQueue.replaceAt(mZslQueueTail);
+        mZslQueueTail = (mZslQueueTail + 1) % kZslBufferDepth;
+    }
+
+    ZslPair &queueHead = mZslQueue.editItemAt(mZslQueueHead);
+
+    res = mZslConsumer->acquireBuffer(&(queueHead.buffer));
+    if (res != OK) {
+        if (res != BufferItemConsumer::NO_BUFFER_AVAILABLE) {
+            ALOGE("%s: Camera %d: Error receiving ZSL image buffer: "
+                    "%s (%d)", __FUNCTION__,
+                    client->getCameraId(), strerror(-res), res);
+        }
+        return res;
+    }
+    queueHead.frame.release();
+
+    mZslQueueHead = (mZslQueueHead + 1) % kZslBufferDepth;
+
+    ALOGVV("  Added buffer, timestamp %lld", queueHead.buffer.mTimestamp);
+
+    findMatchesLocked();
+
+    return OK;
+}
+
+void ZslProcessor::findMatchesLocked() {
+    for (size_t i = 0; i < mZslQueue.size(); i++) {
+        ZslPair &queueEntry = mZslQueue.editItemAt(i);
+        nsecs_t bufferTimestamp = queueEntry.buffer.mTimestamp;
+        if (queueEntry.frame.isEmpty() && bufferTimestamp != 0) {
+            // Have buffer, no matching frame. Look for one
+            for (size_t j = 0; j < mFrameList.size(); j++) {
+                bool match = false;
+                CameraMetadata &frame = mFrameList.editItemAt(j);
+                if (!frame.isEmpty()) {
+                    camera_metadata_entry_t entry;
+                    entry = frame.find(ANDROID_SENSOR_TIMESTAMP);
+                    if (entry.count == 0) {
+                        ALOGE("%s: Can't find timestamp in frame!",
+                                __FUNCTION__);
+                        continue;
+                    }
+                    nsecs_t frameTimestamp = entry.data.i64[0];
+                    if (bufferTimestamp == frameTimestamp) {
+                        ALOGVV("%s: Found match %lld", __FUNCTION__,
+                                frameTimestamp);
+                        match = true;
+                    } else {
+                        int64_t delta = abs(bufferTimestamp - frameTimestamp);
+                        if ( delta < 1000000) {
+                            ALOGVV("%s: Found close match %lld (delta %lld)",
+                                    __FUNCTION__, bufferTimestamp, delta);
+                            match = true;
+                        }
+                    }
+                }
+                if (match) {
+                    queueEntry.frame.acquire(frame);
+                    break;
+                }
+            }
+        }
+    }
+}
+
+}; // namespace camera2
+}; // namespace android
diff --git a/services/camera/libcameraservice/camera2/ZslProcessor.h b/services/camera/libcameraservice/camera2/ZslProcessor.h
new file mode 100644
index 0000000..74921a3
--- /dev/null
+++ b/services/camera/libcameraservice/camera2/ZslProcessor.h
@@ -0,0 +1,119 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA_CAMERA2_ZSLPROCESSOR_H
+#define ANDROID_SERVERS_CAMERA_CAMERA2_ZSLPROCESSOR_H
+
+#include <utils/Thread.h>
+#include <utils/String16.h>
+#include <utils/Vector.h>
+#include <utils/Mutex.h>
+#include <utils/Condition.h>
+#include <gui/BufferItemConsumer.h>
+#include "Parameters.h"
+#include "FrameProcessor.h"
+#include "CameraMetadata.h"
+#include "Camera2Heap.h"
+#include "../Camera2Device.h"
+
+namespace android {
+
+class Camera2Client;
+
+namespace camera2 {
+
+class CaptureSequencer;
+
+/***
+ * ZSL queue processing
+ */
+class ZslProcessor:
+            virtual public Thread,
+            virtual public BufferItemConsumer::FrameAvailableListener,
+            virtual public FrameProcessor::FilteredListener,
+            virtual public Camera2Device::BufferReleasedListener {
+  public:
+    ZslProcessor(wp<Camera2Client> client, wp<CaptureSequencer> sequencer);
+    ~ZslProcessor();
+
+    // From mZslConsumer
+    virtual void onFrameAvailable();
+    // From FrameProcessor
+    virtual void onFrameAvailable(int32_t frameId, CameraMetadata &frame);
+
+    virtual void onBufferReleased(buffer_handle_t *handle);
+
+    status_t updateStream(const Parameters &params);
+    status_t deleteStream();
+    int getStreamId() const;
+    int getReprocessStreamId() const;
+
+    status_t pushToReprocess(int32_t requestId);
+
+    void dump(int fd, const Vector<String16>& args) const;
+  private:
+    static const nsecs_t kWaitDuration = 10000000; // 10 ms
+
+    enum {
+        RUNNING,
+        LOCKED
+    } mState;
+
+    wp<Camera2Client> mClient;
+    wp<CaptureSequencer> mSequencer;
+
+    mutable Mutex mInputMutex;
+    bool mZslBufferAvailable;
+    Condition mZslBufferAvailableSignal;
+
+    enum {
+        NO_STREAM = -1
+    };
+
+    int mZslStreamId;
+    int mZslReprocessStreamId;
+    sp<BufferItemConsumer> mZslConsumer;
+    sp<ANativeWindow>      mZslWindow;
+
+    struct ZslPair {
+        BufferItemConsumer::BufferItem buffer;
+        CameraMetadata frame;
+    };
+
+    static const size_t kZslBufferDepth = 3;
+    static const size_t kFrameListDepth = kZslBufferDepth * 2;
+    Vector<CameraMetadata> mFrameList;
+    size_t mFrameListHead;
+
+    ZslPair mNextPair;
+
+    Vector<ZslPair> mZslQueue;
+    size_t mZslQueueHead;
+    size_t mZslQueueTail;
+
+    virtual bool threadLoop();
+
+    status_t processNewZslBuffer(sp<Camera2Client> &client);
+
+    // Match up entries from frame list to buffers in ZSL queue
+    void findMatchesLocked();
+};
+
+
+}; //namespace camera2
+}; //namespace android
+
+#endif