am 3a4ff0b8: am d74a9ee8: Merge "libstagefright: fix the duration "00:00" for some httplive link"

* commit '3a4ff0b8bf464c9c286d63326bea42609527051e':
  libstagefright: fix the duration "00:00" for some httplive link
diff --git a/include/media/stagefright/ACodec.h b/include/media/stagefright/ACodec.h
index 34bae29..df25d7b 100644
--- a/include/media/stagefright/ACodec.h
+++ b/include/media/stagefright/ACodec.h
@@ -278,7 +278,7 @@
     void deferMessage(const sp<AMessage> &msg);
     void processDeferredMessages();
 
-    void sendFormatChange();
+    void sendFormatChange(const sp<AMessage> &reply);
 
     void signalError(
             OMX_ERRORTYPE error = OMX_ErrorUndefined,
diff --git a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
index 54f8d9e..85232e7 100644
--- a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
+++ b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
@@ -82,7 +82,7 @@
         {0x0634f220, 0xddd4, 0x11db, 0xa0fc, { 0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b }},
         {0x8631f300, 0x72e2, 0x11df, 0xb57e, {0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}}, // uuid
         EFFECT_CONTROL_API_VERSION,
-        (EFFECT_FLAG_TYPE_INSERT | EFFECT_FLAG_INSERT_LAST | EFFECT_FLAG_DEVICE_IND
+        (EFFECT_FLAG_TYPE_INSERT | EFFECT_FLAG_INSERT_FIRST | EFFECT_FLAG_DEVICE_IND
         | EFFECT_FLAG_VOLUME_CTRL),
         BASS_BOOST_CUP_LOAD_ARM9E,
         BUNDLE_MEM_USAGE,
@@ -108,7 +108,7 @@
         {0x0bed4300, 0xddd6, 0x11db, 0x8f34, {0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}}, // type
         {0xce772f20, 0x847d, 0x11df, 0xbb17, {0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}}, // uuid Eq NXP
         EFFECT_CONTROL_API_VERSION,
-        (EFFECT_FLAG_TYPE_INSERT | EFFECT_FLAG_INSERT_LAST | EFFECT_FLAG_VOLUME_CTRL),
+        (EFFECT_FLAG_TYPE_INSERT | EFFECT_FLAG_INSERT_FIRST | EFFECT_FLAG_VOLUME_CTRL),
         EQUALIZER_CUP_LOAD_ARM9E,
         BUNDLE_MEM_USAGE,
         "Equalizer",
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index 64e3885..cf41cf2 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -2229,7 +2229,7 @@
     }
 }
 
-void ACodec::sendFormatChange() {
+void ACodec::sendFormatChange(const sp<AMessage> &reply) {
     sp<AMessage> notify = mNotify->dup();
     notify->setInt32("what", kWhatOutputFormatChanged);
 
@@ -2294,14 +2294,12 @@
                         rect.nTop + rect.nHeight - 1);
 
                 if (mNativeWindow != NULL) {
-                    android_native_rect_t crop;
-                    crop.left = rect.nLeft;
-                    crop.top = rect.nTop;
-                    crop.right = rect.nLeft + rect.nWidth;
-                    crop.bottom = rect.nTop + rect.nHeight;
-
-                    CHECK_EQ(0, native_window_set_crop(
-                                mNativeWindow.get(), &crop));
+                    reply->setRect(
+                            "crop",
+                            rect.nLeft,
+                            rect.nTop,
+                            rect.nLeft + rect.nWidth,
+                            rect.nTop + rect.nHeight);
                 }
             }
             break;
@@ -3057,8 +3055,11 @@
                 break;
             }
 
+            sp<AMessage> reply =
+                new AMessage(kWhatOutputBufferDrained, mCodec->id());
+
             if (!mCodec->mSentFormat) {
-                mCodec->sendFormatChange();
+                mCodec->sendFormatChange(reply);
             }
 
             info->mData->setRange(rangeOffset, rangeLength);
@@ -3081,9 +3082,6 @@
             notify->setBuffer("buffer", info->mData);
             notify->setInt32("flags", flags);
 
-            sp<AMessage> reply =
-                new AMessage(kWhatOutputBufferDrained, mCodec->id());
-
             reply->setPointer("buffer-id", info->mBufferID);
 
             notify->setMessage("reply", reply);
@@ -3127,6 +3125,13 @@
         mCodec->findBufferByID(kPortIndexOutput, bufferID, &index);
     CHECK_EQ((int)info->mStatus, (int)BufferInfo::OWNED_BY_DOWNSTREAM);
 
+    android_native_rect_t crop;
+    if (msg->findRect("crop",
+            &crop.left, &crop.top, &crop.right, &crop.bottom)) {
+        CHECK_EQ(0, native_window_set_crop(
+                mCodec->mNativeWindow.get(), &crop));
+    }
+
     int32_t render;
     if (mCodec->mNativeWindow != NULL
             && msg->findInt32("render", &render) && render != 0
diff --git a/services/audioflinger/Effects.cpp b/services/audioflinger/Effects.cpp
index d66294c..942ea35 100644
--- a/services/audioflinger/Effects.cpp
+++ b/services/audioflinger/Effects.cpp
@@ -680,7 +680,7 @@
         return mStatus;
     }
     status_t status = NO_ERROR;
-    if (device && (mDescriptor.flags & EFFECT_FLAG_DEVICE_MASK) == EFFECT_FLAG_DEVICE_IND) {
+    if ((mDescriptor.flags & EFFECT_FLAG_DEVICE_MASK) == EFFECT_FLAG_DEVICE_IND) {
         status_t cmdStatus;
         uint32_t size = sizeof(status_t);
         uint32_t cmd = audio_is_output_devices(device) ? EFFECT_CMD_SET_DEVICE :
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 539bb4f..97f66f4 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -1330,13 +1330,11 @@
         track->mResetDone = false;
         track->mPresentationCompleteFrames = 0;
         mActiveTracks.add(track);
-        if (track->mainBuffer() != mMixBuffer) {
-            sp<EffectChain> chain = getEffectChain_l(track->sessionId());
-            if (chain != 0) {
-                ALOGV("addTrack_l() starting track on chain %p for session %d", chain.get(),
-                        track->sessionId());
-                chain->incActiveTrackCnt();
-            }
+        sp<EffectChain> chain = getEffectChain_l(track->sessionId());
+        if (chain != 0) {
+            ALOGV("addTrack_l() starting track on chain %p for session %d", chain.get(),
+                    track->sessionId());
+            chain->incActiveTrackCnt();
         }
 
         status = NO_ERROR;
@@ -2992,9 +2990,11 @@
 
             // forward device change to effects that have requested to be
             // aware of attached audio device.
-            mOutDevice = value;
-            for (size_t i = 0; i < mEffectChains.size(); i++) {
-                mEffectChains[i]->setDevice_l(mOutDevice);
+            if (value != AUDIO_DEVICE_NONE) {
+                mOutDevice = value;
+                for (size_t i = 0; i < mEffectChains.size(); i++) {
+                    mEffectChains[i]->setDevice_l(mOutDevice);
+                }
             }
         }
 
diff --git a/services/camera/libcameraservice/Camera2Client.cpp b/services/camera/libcameraservice/Camera2Client.cpp
index dd50e3c..a1971e3 100644
--- a/services/camera/libcameraservice/Camera2Client.cpp
+++ b/services/camera/libcameraservice/Camera2Client.cpp
@@ -102,6 +102,9 @@
     String8 threadName;
 
     mStreamingProcessor = new StreamingProcessor(this);
+    threadName = String8::format("C2-%d-StreamProc",
+            mCameraId);
+    mStreamingProcessor->run(threadName.string());
 
     mFrameProcessor = new FrameProcessor(mDevice, this);
     threadName = String8::format("C2-%d-FrameProc",
@@ -411,6 +414,7 @@
     mCallbackProcessor->deleteStream();
     mZslProcessor->deleteStream();
 
+    mStreamingProcessor->requestExit();
     mFrameProcessor->requestExit();
     mCaptureSequencer->requestExit();
     mJpegProcessor->requestExit();
@@ -419,6 +423,7 @@
 
     ALOGV("Camera %d: Waiting for threads", mCameraId);
 
+    mStreamingProcessor->join();
     mFrameProcessor->join();
     mCaptureSequencer->join();
     mJpegProcessor->join();
@@ -677,6 +682,22 @@
         return res;
     }
 
+    // We could wait to create the JPEG output stream until first actual use
+    // (first takePicture call). However, this would substantially increase the
+    // first capture latency on HAL3 devices, and potentially on some HAL2
+    // devices. So create it unconditionally at preview start. As a drawback,
+    // this increases gralloc memory consumption for applications that don't
+    // ever take a picture.
+    // TODO: Find a better compromise, though this likely would involve HAL
+    // changes.
+    res = updateProcessorStream(mJpegProcessor, params);
+    if (res != OK) {
+        ALOGE("%s: Camera %d: Can't pre-configure still image "
+                "stream: %s (%d)",
+                __FUNCTION__, mCameraId, strerror(-res), res);
+        return res;
+    }
+
     Vector<uint8_t> outputStreams;
     bool callbacksEnabled = params.previewCallbackFlags &
         CAMERA_FRAME_CALLBACK_FLAG_ENABLE_MASK;
@@ -714,18 +735,6 @@
         res = mStreamingProcessor->startStream(StreamingProcessor::PREVIEW,
                 outputStreams);
     } else {
-        // With recording hint set, we're going to be operating under the
-        // assumption that the user will record video. To optimize recording
-        // startup time, create the necessary output streams for recording and
-        // video snapshot now if they don't already exist.
-        res = updateProcessorStream(mJpegProcessor, params);
-        if (res != OK) {
-            ALOGE("%s: Camera %d: Can't pre-configure still image "
-                    "stream: %s (%d)",
-                    __FUNCTION__, mCameraId, strerror(-res), res);
-            return res;
-        }
-
         if (!restart) {
             res = mStreamingProcessor->updateRecordingRequest(params);
             if (res != OK) {
@@ -1007,8 +1016,12 @@
           * If the camera does not support auto-focus, it is a no-op and
           * onAutoFocus(boolean, Camera) callback will be called immediately
           * with a fake value of success set to true.
+          *
+          * Similarly, if focus mode is set to INFINITY, there's no reason to
+          * bother the HAL.
           */
-        if (l.mParameters.focusMode == Parameters::FOCUS_MODE_FIXED) {
+        if (l.mParameters.focusMode == Parameters::FOCUS_MODE_FIXED ||
+                l.mParameters.focusMode == Parameters::FOCUS_MODE_INFINITY) {
             notifyImmediately = true;
             notifySuccess = true;
         }
@@ -1068,6 +1081,11 @@
     int triggerId;
     {
         SharedParameters::Lock l(mParameters);
+        // Canceling does nothing in FIXED or INFINITY modes
+        if (l.mParameters.focusMode == Parameters::FOCUS_MODE_FIXED ||
+                l.mParameters.focusMode == Parameters::FOCUS_MODE_INFINITY) {
+            return OK;
+        }
         triggerId = ++l.mParameters.afTriggerCounter;
 
         // When using triggerAfWithAuto quirk, may need to reset focus mode to
diff --git a/services/camera/libcameraservice/Camera3Device.cpp b/services/camera/libcameraservice/Camera3Device.cpp
index 0b5e9c4..cc7802b 100644
--- a/services/camera/libcameraservice/Camera3Device.cpp
+++ b/services/camera/libcameraservice/Camera3Device.cpp
@@ -170,6 +170,7 @@
     mHal3Device = device;
     mStatus = STATUS_IDLE;
     mNextStreamId = 0;
+    mNeedConfig = true;
 
     return OK;
 }
@@ -180,24 +181,29 @@
 
     ALOGV("%s: E", __FUNCTION__);
 
-    status_t res;
-    if (mStatus == STATUS_UNINITIALIZED) return OK;
+    status_t res = OK;
+    if (mStatus == STATUS_UNINITIALIZED) return res;
 
     if (mStatus == STATUS_ACTIVE ||
             (mStatus == STATUS_ERROR && mRequestThread != NULL)) {
         res = mRequestThread->clearRepeatingRequests();
         if (res != OK) {
             SET_ERR_L("Can't stop streaming");
-            return res;
-        }
-        res = waitUntilDrainedLocked();
-        if (res != OK) {
-            SET_ERR_L("Timeout waiting for HAL to drain");
-            return res;
+            // Continue to close device even in case of error
+        } else {
+            res = waitUntilDrainedLocked();
+            if (res != OK) {
+                SET_ERR_L("Timeout waiting for HAL to drain");
+                // Continue to close device even in case of error
+            }
         }
     }
     assert(mStatus == STATUS_IDLE || mStatus == STATUS_ERROR);
 
+    if (mStatus == STATUS_ERROR) {
+        CLOGE("Shutting down in an error state");
+    }
+
     if (mRequestThread != NULL) {
         mRequestThread->requestExit();
     }
@@ -206,7 +212,12 @@
     mInputStream.clear();
 
     if (mRequestThread != NULL) {
-        mRequestThread->join();
+        if (mStatus != STATUS_ERROR) {
+            // HAL may be in a bad state, so waiting for request thread
+            // (which may be stuck in the HAL processCaptureRequest call)
+            // could be dangerous.
+            mRequestThread->join();
+        }
         mRequestThread.clear();
     }
 
@@ -218,7 +229,7 @@
     mStatus = STATUS_UNINITIALIZED;
 
     ALOGV("%s: X", __FUNCTION__);
-    return OK;
+    return res;
 }
 
 status_t Camera3Device::dump(int fd, const Vector<String16> &args) {
@@ -582,6 +593,7 @@
     }
 
     *id = mNextStreamId++;
+    mNeedConfig = true;
 
     // Continue captures if active at start
     if (wasActive) {
@@ -707,6 +719,7 @@
         // fall through since we want to still list the stream as deleted.
     }
     mDeletedStreams.add(deletedStream);
+    mNeedConfig = true;
 
     return res;
 }
@@ -1007,6 +1020,12 @@
         return INVALID_OPERATION;
     }
 
+    if (!mNeedConfig) {
+        ALOGV("%s: Skipping config, no stream changes", __FUNCTION__);
+        mStatus = STATUS_ACTIVE;
+        return OK;
+    }
+
     // Start configuring the streams
 
     camera3_stream_configuration config;
@@ -1091,6 +1110,7 @@
     // Finish configuring the streams lazily on first reference
 
     mStatus = STATUS_ACTIVE;
+    mNeedConfig = false;
 
     return OK;
 }
@@ -1125,7 +1145,7 @@
     ALOGE("Camera %d: %s", mId, errorCause.string());
 
     // But only do error state transition steps for the first error
-    if (mStatus == STATUS_ERROR) return;
+    if (mStatus == STATUS_ERROR || mStatus == STATUS_UNINITIALIZED) return;
 
     mErrorCause = errorCause;
 
diff --git a/services/camera/libcameraservice/Camera3Device.h b/services/camera/libcameraservice/Camera3Device.h
index 7a8c22a..faa42b9 100644
--- a/services/camera/libcameraservice/Camera3Device.h
+++ b/services/camera/libcameraservice/Camera3Device.h
@@ -149,6 +149,7 @@
     StreamSet                  mOutputStreams;
     sp<camera3::Camera3Stream> mInputStream;
     int                        mNextStreamId;
+    bool                       mNeedConfig;
 
     // Need to hold on to stream references until configure completes.
     Vector<sp<camera3::Camera3StreamInterface> > mDeletedStreams;
diff --git a/services/camera/libcameraservice/camera2/CallbackProcessor.cpp b/services/camera/libcameraservice/camera2/CallbackProcessor.cpp
index 5fa84e0..98673ff 100644
--- a/services/camera/libcameraservice/camera2/CallbackProcessor.cpp
+++ b/services/camera/libcameraservice/camera2/CallbackProcessor.cpp
@@ -214,25 +214,33 @@
     status_t res;
 
     sp<Camera2Heap> callbackHeap;
-    size_t heapIdx;
-
-    CpuConsumer::LockedBuffer imgBuffer;
-    ALOGV("%s: Getting buffer", __FUNCTION__);
-    res = mCallbackConsumer->lockNextBuffer(&imgBuffer);
-    if (res != OK) {
-        if (res != BAD_VALUE) {
-            ALOGE("%s: Camera %d: Error receiving next callback buffer: "
-                    "%s (%d)", __FUNCTION__, mId, strerror(-res), res);
-        }
-        return res;
-    }
-    ALOGV("%s: Camera %d: Preview callback available", __FUNCTION__,
-            mId);
-
     bool useFlexibleYuv = false;
     int32_t previewFormat = 0;
+    size_t heapIdx;
+
     {
+        /* acquire SharedParameters before mMutex so we don't dead lock
+            with Camera2Client code calling into StreamingProcessor */
         SharedParameters::Lock l(client->getParameters());
+        Mutex::Autolock m(mInputMutex);
+        CpuConsumer::LockedBuffer imgBuffer;
+        if (mCallbackStreamId == NO_STREAM) {
+            ALOGV("%s: Camera %d:No stream is available"
+                    , __FUNCTION__, mId);
+            return INVALID_OPERATION;
+        }
+
+        ALOGV("%s: Getting buffer", __FUNCTION__);
+        res = mCallbackConsumer->lockNextBuffer(&imgBuffer);
+        if (res != OK) {
+            if (res != BAD_VALUE) {
+                ALOGE("%s: Camera %d: Error receiving next callback buffer: "
+                        "%s (%d)", __FUNCTION__, mId, strerror(-res), res);
+            }
+            return res;
+        }
+        ALOGV("%s: Camera %d: Preview callback available", __FUNCTION__,
+                mId);
 
         if ( l.mParameters.state != Parameters::PREVIEW
                 && l.mParameters.state != Parameters::RECORD
@@ -279,86 +287,89 @@
             ALOGV("%s: clearing oneshot", __FUNCTION__);
             l.mParameters.previewCallbackOneShot = false;
         }
-    }
 
-    uint32_t destYStride = 0;
-    uint32_t destCStride = 0;
-    if (useFlexibleYuv) {
-        if (previewFormat == HAL_PIXEL_FORMAT_YV12) {
-            // Strides must align to 16 for YV12
-            destYStride = ALIGN(imgBuffer.width, 16);
-            destCStride = ALIGN(destYStride / 2, 16);
+        uint32_t destYStride = 0;
+        uint32_t destCStride = 0;
+        if (useFlexibleYuv) {
+            if (previewFormat == HAL_PIXEL_FORMAT_YV12) {
+                // Strides must align to 16 for YV12
+                destYStride = ALIGN(imgBuffer.width, 16);
+                destCStride = ALIGN(destYStride / 2, 16);
+            } else {
+                // No padding for NV21
+                ALOG_ASSERT(previewFormat == HAL_PIXEL_FORMAT_YCrCb_420_SP,
+                        "Unexpected preview format 0x%x", previewFormat);
+                destYStride = imgBuffer.width;
+                destCStride = destYStride / 2;
+            }
         } else {
-            // No padding for NV21
-            ALOG_ASSERT(previewFormat == HAL_PIXEL_FORMAT_YCrCb_420_SP,
-                    "Unexpected preview format 0x%x", previewFormat);
-            destYStride = imgBuffer.width;
-            destCStride = destYStride / 2;
+            destYStride = imgBuffer.stride;
+            // don't care about cStride
         }
-    } else {
-        destYStride = imgBuffer.stride;
-        // don't care about cStride
-    }
 
-    size_t bufferSize = Camera2Client::calculateBufferSize(
-            imgBuffer.width, imgBuffer.height,
-            previewFormat, destYStride);
-    size_t currentBufferSize = (mCallbackHeap == 0) ?
-            0 : (mCallbackHeap->mHeap->getSize() / kCallbackHeapCount);
-    if (bufferSize != currentBufferSize) {
-        mCallbackHeap.clear();
-        mCallbackHeap = new Camera2Heap(bufferSize, kCallbackHeapCount,
-                "Camera2Client::CallbackHeap");
-        if (mCallbackHeap->mHeap->getSize() == 0) {
-            ALOGE("%s: Camera %d: Unable to allocate memory for callbacks",
+        size_t bufferSize = Camera2Client::calculateBufferSize(
+                imgBuffer.width, imgBuffer.height,
+                previewFormat, destYStride);
+        size_t currentBufferSize = (mCallbackHeap == 0) ?
+                0 : (mCallbackHeap->mHeap->getSize() / kCallbackHeapCount);
+        if (bufferSize != currentBufferSize) {
+            mCallbackHeap.clear();
+            mCallbackHeap = new Camera2Heap(bufferSize, kCallbackHeapCount,
+                    "Camera2Client::CallbackHeap");
+            if (mCallbackHeap->mHeap->getSize() == 0) {
+                ALOGE("%s: Camera %d: Unable to allocate memory for callbacks",
+                        __FUNCTION__, mId);
+                mCallbackConsumer->unlockBuffer(imgBuffer);
+                return INVALID_OPERATION;
+            }
+
+            mCallbackHeapHead = 0;
+            mCallbackHeapFree = kCallbackHeapCount;
+        }
+
+        if (mCallbackHeapFree == 0) {
+            ALOGE("%s: Camera %d: No free callback buffers, dropping frame",
                     __FUNCTION__, mId);
             mCallbackConsumer->unlockBuffer(imgBuffer);
-            return INVALID_OPERATION;
+            return OK;
         }
 
-        mCallbackHeapHead = 0;
-        mCallbackHeapFree = kCallbackHeapCount;
-    }
+        heapIdx = mCallbackHeapHead;
 
-    if (mCallbackHeapFree == 0) {
-        ALOGE("%s: Camera %d: No free callback buffers, dropping frame",
-                __FUNCTION__, mId);
+        mCallbackHeapHead = (mCallbackHeapHead + 1) & kCallbackHeapCount;
+        mCallbackHeapFree--;
+
+        // TODO: Get rid of this copy by passing the gralloc queue all the way
+        // to app
+
+        ssize_t offset;
+        size_t size;
+        sp<IMemoryHeap> heap =
+                mCallbackHeap->mBuffers[heapIdx]->getMemory(&offset,
+                        &size);
+        uint8_t *data = (uint8_t*)heap->getBase() + offset;
+
+        if (!useFlexibleYuv) {
+            // Can just memcpy when HAL format matches API format
+            memcpy(data, imgBuffer.data, bufferSize);
+        } else {
+            res = convertFromFlexibleYuv(previewFormat, data, imgBuffer,
+                    destYStride, destCStride);
+            if (res != OK) {
+                ALOGE("%s: Camera %d: Can't convert between 0x%x and 0x%x formats!",
+                        __FUNCTION__, mId, imgBuffer.format, previewFormat);
+                mCallbackConsumer->unlockBuffer(imgBuffer);
+                return BAD_VALUE;
+            }
+        }
+
+        ALOGV("%s: Freeing buffer", __FUNCTION__);
         mCallbackConsumer->unlockBuffer(imgBuffer);
-        return OK;
+
+        // mCallbackHeap may get freed up once input mutex is released
+        callbackHeap = mCallbackHeap;
     }
 
-    heapIdx = mCallbackHeapHead;
-
-    mCallbackHeapHead = (mCallbackHeapHead + 1) & kCallbackHeapCount;
-    mCallbackHeapFree--;
-
-    // TODO: Get rid of this copy by passing the gralloc queue all the way
-    // to app
-
-    ssize_t offset;
-    size_t size;
-    sp<IMemoryHeap> heap =
-            mCallbackHeap->mBuffers[heapIdx]->getMemory(&offset,
-                    &size);
-    uint8_t *data = (uint8_t*)heap->getBase() + offset;
-
-    if (!useFlexibleYuv) {
-        // Can just memcpy when HAL format matches API format
-        memcpy(data, imgBuffer.data, bufferSize);
-    } else {
-        res = convertFromFlexibleYuv(previewFormat, data, imgBuffer,
-                destYStride, destCStride);
-        if (res != OK) {
-            ALOGE("%s: Camera %d: Can't convert between 0x%x and 0x%x formats!",
-                    __FUNCTION__, mId, imgBuffer.format, previewFormat);
-            mCallbackConsumer->unlockBuffer(imgBuffer);
-            return BAD_VALUE;
-        }
-    }
-
-    ALOGV("%s: Freeing buffer", __FUNCTION__);
-    mCallbackConsumer->unlockBuffer(imgBuffer);
-
     // Call outside parameter lock to allow re-entrancy from notification
     {
         Camera2Client::SharedCameraCallbacks::Lock
@@ -367,7 +378,7 @@
             ALOGV("%s: Camera %d: Invoking client data callback",
                     __FUNCTION__, mId);
             l.mRemoteCallback->dataCallback(CAMERA_MSG_PREVIEW_FRAME,
-                    mCallbackHeap->mBuffers[heapIdx], NULL);
+                    callbackHeap->mBuffers[heapIdx], NULL);
         }
     }
 
diff --git a/services/camera/libcameraservice/camera2/CaptureSequencer.cpp b/services/camera/libcameraservice/camera2/CaptureSequencer.cpp
index 266e516..e5a011c 100644
--- a/services/camera/libcameraservice/camera2/CaptureSequencer.cpp
+++ b/services/camera/libcameraservice/camera2/CaptureSequencer.cpp
@@ -253,6 +253,12 @@
                 res = INVALID_OPERATION;
                 break;
             case Parameters::STILL_CAPTURE:
+                res = client->getCameraDevice()->waitUntilDrained();
+                if (res != OK) {
+                    ALOGE("%s: Camera %d: Can't idle after still capture: "
+                            "%s (%d)", __FUNCTION__, client->getCameraId(),
+                            strerror(-res), res);
+                }
                 l.mParameters.state = Parameters::STOPPED;
                 break;
             case Parameters::VIDEO_SNAPSHOT:
diff --git a/services/camera/libcameraservice/camera2/StreamingProcessor.cpp b/services/camera/libcameraservice/camera2/StreamingProcessor.cpp
index f4a9217..fed05a6 100644
--- a/services/camera/libcameraservice/camera2/StreamingProcessor.cpp
+++ b/services/camera/libcameraservice/camera2/StreamingProcessor.cpp
@@ -41,6 +41,7 @@
         mPreviewStreamId(NO_STREAM),
         mRecordingRequestId(Camera2Client::kRecordingRequestIdStart),
         mRecordingStreamId(NO_STREAM),
+        mRecordingFrameAvailable(false),
         mRecordingHeapCount(kDefaultRecordingHeapCount)
 {
 }
@@ -536,6 +537,36 @@
 
 void StreamingProcessor::onFrameAvailable() {
     ATRACE_CALL();
+    Mutex::Autolock l(mMutex);
+    if (!mRecordingFrameAvailable) {
+        mRecordingFrameAvailable = true;
+        mRecordingFrameAvailableSignal.signal();
+    }
+
+}
+
+bool StreamingProcessor::threadLoop() {
+    status_t res;
+
+    {
+        Mutex::Autolock l(mMutex);
+        while (!mRecordingFrameAvailable) {
+            res = mRecordingFrameAvailableSignal.waitRelative(
+                mMutex, kWaitDuration);
+            if (res == TIMED_OUT) return true;
+        }
+        mRecordingFrameAvailable = false;
+    }
+
+    do {
+        res = processRecordingFrame();
+    } while (res == OK);
+
+    return true;
+}
+
+status_t StreamingProcessor::processRecordingFrame() {
+    ATRACE_CALL();
     status_t res;
     sp<Camera2Heap> recordingHeap;
     size_t heapIdx = 0;
@@ -547,12 +578,14 @@
         BufferItemConsumer::BufferItem imgBuffer;
         res = mRecordingConsumer->acquireBuffer(&imgBuffer);
         if (res != OK) {
-            ALOGE("%s: Camera %d: Error receiving recording buffer: %s (%d)",
-                    __FUNCTION__, mId, strerror(-res), res);
-            return;
+            if (res != BufferItemConsumer::NO_BUFFER_AVAILABLE) {
+                ALOGE("%s: Camera %d: Can't acquire recording buffer: %s (%d)",
+                        __FUNCTION__, mId, strerror(-res), res);
+            }
+            return res;
         }
         mRecordingConsumer->releaseBuffer(imgBuffer);
-        return;
+        return OK;
     }
 
     {
@@ -563,23 +596,24 @@
         BufferItemConsumer::BufferItem imgBuffer;
         res = mRecordingConsumer->acquireBuffer(&imgBuffer);
         if (res != OK) {
-            ALOGE("%s: Camera %d: Error receiving recording buffer: %s (%d)",
-                    __FUNCTION__, mId, strerror(-res), res);
-            return;
+            if (res != BufferItemConsumer::NO_BUFFER_AVAILABLE) {
+                ALOGE("%s: Camera %d: Can't acquire recording buffer: %s (%d)",
+                        __FUNCTION__, mId, strerror(-res), res);
+            }
+            return res;
         }
         timestamp = imgBuffer.mTimestamp;
 
         mRecordingFrameCount++;
         ALOGV("OnRecordingFrame: Frame %d", mRecordingFrameCount);
 
-        // TODO: Signal errors here upstream
         if (l.mParameters.state != Parameters::RECORD &&
                 l.mParameters.state != Parameters::VIDEO_SNAPSHOT) {
             ALOGV("%s: Camera %d: Discarding recording image buffers "
                     "received after recording done", __FUNCTION__,
                     mId);
             mRecordingConsumer->releaseBuffer(imgBuffer);
-            return;
+            return INVALID_OPERATION;
         }
 
         if (mRecordingHeap == 0) {
@@ -594,7 +628,7 @@
                 ALOGE("%s: Camera %d: Unable to allocate memory for recording",
                         __FUNCTION__, mId);
                 mRecordingConsumer->releaseBuffer(imgBuffer);
-                return;
+                return NO_MEMORY;
             }
             for (size_t i = 0; i < mRecordingBuffers.size(); i++) {
                 if (mRecordingBuffers[i].mBuf !=
@@ -615,7 +649,7 @@
             ALOGE("%s: Camera %d: No free recording buffers, dropping frame",
                     __FUNCTION__, mId);
             mRecordingConsumer->releaseBuffer(imgBuffer);
-            return;
+            return NO_MEMORY;
         }
 
         heapIdx = mRecordingHeapHead;
@@ -649,6 +683,7 @@
                 CAMERA_MSG_VIDEO_FRAME,
                 recordingHeap->mBuffers[heapIdx]);
     }
+    return OK;
 }
 
 void StreamingProcessor::releaseRecordingFrame(const sp<IMemory>& mem) {
diff --git a/services/camera/libcameraservice/camera2/StreamingProcessor.h b/services/camera/libcameraservice/camera2/StreamingProcessor.h
index 281b344..9f71fa0 100644
--- a/services/camera/libcameraservice/camera2/StreamingProcessor.h
+++ b/services/camera/libcameraservice/camera2/StreamingProcessor.h
@@ -37,7 +37,8 @@
 /**
  * Management and processing for preview and recording streams
  */
-class StreamingProcessor: public BufferItemConsumer::FrameAvailableListener {
+class StreamingProcessor:
+            public Thread, public BufferItemConsumer::FrameAvailableListener {
   public:
     StreamingProcessor(sp<Camera2Client> client);
     ~StreamingProcessor();
@@ -103,6 +104,8 @@
     sp<ANativeWindow> mPreviewWindow;
 
     // Recording-related members
+    static const nsecs_t kWaitDuration = 50000000; // 50 ms
+
     int32_t mRecordingRequestId;
     int mRecordingStreamId;
     int mRecordingFrameCount;
@@ -111,11 +114,17 @@
     CameraMetadata mRecordingRequest;
     sp<camera2::Camera2Heap> mRecordingHeap;
 
+    bool mRecordingFrameAvailable;
+    Condition mRecordingFrameAvailableSignal;
+
     static const size_t kDefaultRecordingHeapCount = 8;
     size_t mRecordingHeapCount;
     Vector<BufferItemConsumer::BufferItem> mRecordingBuffers;
     size_t mRecordingHeapHead, mRecordingHeapFree;
 
+    virtual bool threadLoop();
+
+    status_t processRecordingFrame();
 };
 
 
diff --git a/services/camera/libcameraservice/camera2/ZslProcessor3.cpp b/services/camera/libcameraservice/camera2/ZslProcessor3.cpp
index 2e06691..40c77df 100644
--- a/services/camera/libcameraservice/camera2/ZslProcessor3.cpp
+++ b/services/camera/libcameraservice/camera2/ZslProcessor3.cpp
@@ -329,8 +329,9 @@
 }
 
 bool ZslProcessor3::threadLoop() {
-    // TODO: remove dependency on thread
-    return true;
+    // TODO: remove dependency on thread. For now, shut thread down right
+    // away.
+    return false;
 }
 
 void ZslProcessor3::dumpZslQueue(int fd) const {