Merge "Adding Metadata mode to SurfaceMediaSource"
diff --git a/cmds/stagefright/stagefright.cpp b/cmds/stagefright/stagefright.cpp
index b42f1c5..34f0a64 100644
--- a/cmds/stagefright/stagefright.cpp
+++ b/cmds/stagefright/stagefright.cpp
@@ -803,9 +803,9 @@
             printf("type '%s':\n", kMimeTypes[k]);
 
             Vector<CodecCapabilities> results;
+            // will retrieve hardware and software codecs
             CHECK_EQ(QueryCodecs(omx, kMimeTypes[k],
                                  true, // queryDecoders
-                                 false, // hwCodecOnly
                                  &results), (status_t)OK);
 
             for (size_t i = 0; i < results.size(); ++i) {
diff --git a/include/media/AudioEffect.h b/include/media/AudioEffect.h
index 496b23e..1417416 100644
--- a/include/media/AudioEffect.h
+++ b/include/media/AudioEffect.h
@@ -21,6 +21,7 @@
 #include <sys/types.h>
 
 #include <media/IAudioFlinger.h>
+#include <media/IAudioPolicyService.h>
 #include <media/IEffect.h>
 #include <media/IEffectClient.h>
 #include <hardware/audio_effect.h>
@@ -111,6 +112,36 @@
 
 
     /*
+     * Returns a list of descriptors corresponding to the pre processings enabled by default
+     * on an AudioRecord with the supplied audio session ID.
+     *
+     * Parameters:
+     *      audioSession:  audio session ID.
+     *      descriptors: address where the effect descriptors should be returned.
+     *      count: as input, the maximum number of descriptor than should be returned
+     *             as output, the number of descriptor returned if status is NO_ERROR or the actual
+     *             number of enabled pre processings if status is NO_MEMORY
+     *
+     * Returned status (from utils/Errors.h) can be:
+     *      NO_ERROR        successful operation.
+     *      NO_MEMORY       the number of descriptor to return is more than the maximum number
+     *                      indicated by count.
+     *      PERMISSION_DENIED could not get AudioFlinger interface
+     *      NO_INIT         effect library failed to initialize
+     *      BAD_VALUE       invalid audio session or descriptor pointers
+     *
+     * Returned value
+     *   *descriptor updated with descriptors of pre processings enabled by default
+     *   *count      number of descriptors returned if returned status is N_ERROR.
+     *               total number of pre processing enabled by default if returned status is
+     *               NO_MEMORY. This happens if the count passed as input is less than the number
+     *               of descriptors to return
+     */
+    static status_t queryDefaultPreProcessing(int audioSession,
+                                              effect_descriptor_t *descriptors,
+                                              uint32_t *count);
+
+    /*
      * Events used by callback function (effect_callback_t).
      */
     enum event_type {
diff --git a/include/media/IAudioPolicyService.h b/include/media/IAudioPolicyService.h
index 86b9f85..ed265e1 100644
--- a/include/media/IAudioPolicyService.h
+++ b/include/media/IAudioPolicyService.h
@@ -85,6 +85,9 @@
                                     int id) = 0;
     virtual status_t unregisterEffect(int id) = 0;
     virtual bool     isStreamActive(int stream, uint32_t inPastMs = 0) const = 0;
+    virtual status_t queryDefaultPreProcessing(int audioSession,
+                                              effect_descriptor_t *descriptors,
+                                              uint32_t *count) = 0;
 };
 
 
diff --git a/include/media/IMediaRecorder.h b/include/media/IMediaRecorder.h
index 007aea6..ec84e25 100644
--- a/include/media/IMediaRecorder.h
+++ b/include/media/IMediaRecorder.h
@@ -43,7 +43,6 @@
     virtual status_t setAudioEncoder(int ae) = 0;
     virtual status_t setOutputFile(const char* path) = 0;
     virtual status_t setOutputFile(int fd, int64_t offset, int64_t length) = 0;
-    virtual status_t setOutputFileAuxiliary(int fd) = 0;
     virtual status_t setVideoSize(int width, int height) = 0;
     virtual status_t setVideoFrameRate(int frames_per_second) = 0;
     virtual status_t setParameters(const String8& params) = 0;
diff --git a/include/media/mediarecorder.h b/include/media/mediarecorder.h
index 72d3736..30db642 100644
--- a/include/media/mediarecorder.h
+++ b/include/media/mediarecorder.h
@@ -215,7 +215,6 @@
     status_t    setAudioEncoder(int ae);
     status_t    setOutputFile(const char* path);
     status_t    setOutputFile(int fd, int64_t offset, int64_t length);
-    status_t    setOutputFileAuxiliary(int fd);
     status_t    setVideoSize(int width, int height);
     status_t    setVideoFrameRate(int frames_per_second);
     status_t    setParameters(const String8& params);
@@ -249,7 +248,6 @@
     bool                        mIsAudioEncoderSet;
     bool                        mIsVideoEncoderSet;
     bool                        mIsOutputFileSet;
-    bool                        mIsAuxiliaryOutputFileSet;
     Mutex                       mLock;
     Mutex                       mNotifyLock;
 };
diff --git a/include/media/stagefright/CameraSourceTimeLapse.h b/include/media/stagefright/CameraSourceTimeLapse.h
index f07ebba..0e264c7 100644
--- a/include/media/stagefright/CameraSourceTimeLapse.h
+++ b/include/media/stagefright/CameraSourceTimeLapse.h
@@ -53,27 +53,10 @@
     void startQuickReadReturns();
 
 private:
-    // If true, will use still camera takePicture() for time lapse frames
-    // If false, will use the videocamera frames instead.
-    bool mUseStillCameraForTimeLapse;
-
-    // Size of picture taken from still camera. This may be larger than the size
-    // of the video, as still camera may not support the exact video resolution
-    // demanded. See setPictureSizeToClosestSupported().
-    int32_t mPictureWidth;
-    int32_t mPictureHeight;
-
     // size of the encoded video.
     int32_t mVideoWidth;
     int32_t mVideoHeight;
 
-    // True if we need to crop the still camera image to get the video frame.
-    bool mNeedCropping;
-
-    // Start location of the cropping rectangle.
-    int32_t mCropRectStartX;
-    int32_t mCropRectStartY;
-
     // Time between capture of two frames during time lapse recording
     // Negative value indicates that timelapse is disabled.
     int64_t mTimeBetweenTimeLapseFrameCaptureUs;
@@ -84,9 +67,6 @@
     // Real timestamp of the last encoded time lapse frame
     int64_t mLastTimeLapseFrameRealTimestampUs;
 
-    // Thread id of thread which takes still picture and sleeps in a loop.
-    pthread_t mThreadTimeLapse;
-
     // Variable set in dataCallbackTimestamp() to help skipCurrentFrame()
     // to know if current frame needs to be skipped.
     bool mSkipCurrentFrame;
@@ -111,9 +91,6 @@
     // Lock for accessing quick stop variables.
     Mutex mQuickStopLock;
 
-    // Condition variable to wake up still picture thread.
-    Condition mTakePictureCondition;
-
     // mQuickStop is set to true if we use quick read() returns, otherwise it is set
     // to false. Once in this mode read() return a copy of the last read frame
     // with the same time stamp. See startQuickReadReturns().
@@ -148,32 +125,13 @@
     // Wrapper over CameraSource::read() to implement quick stop.
     virtual status_t read(MediaBuffer **buffer, const ReadOptions *options = NULL);
 
-    // For still camera case starts a thread which calls camera's takePicture()
-    // in a loop. For video camera case, just starts the camera's video recording.
-    virtual void startCameraRecording();
-
-    // For still camera case joins the thread created in startCameraRecording().
     // For video camera case, just stops the camera's video recording.
     virtual void stopCameraRecording();
 
-    // For still camera case don't need to do anything as memory is locally
-    // allocated with refcounting.
-    // For video camera case just tell the camera to release the frame.
-    virtual void releaseRecordingFrame(const sp<IMemory>& frame);
-
     // mSkipCurrentFrame is set to true in dataCallbackTimestamp() if the current
     // frame needs to be skipped and this function just returns the value of mSkipCurrentFrame.
     virtual bool skipCurrentFrame(int64_t timestampUs);
 
-    // Handles the callback to handle raw frame data from the still camera.
-    // Creates a copy of the frame data as the camera can reuse the frame memory
-    // once this callback returns. The function also sets a new timstamp corresponding
-    // to one frame time ahead of the last encoded frame's time stamp. It then
-    // calls dataCallbackTimestamp() of the base class with the copied data and the
-    // modified timestamp, which will think that it recieved the frame from a video
-    // camera and proceed as usual.
-    virtual void dataCallback(int32_t msgType, const sp<IMemory> &data);
-
     // In the video camera case calls skipFrameAndModifyTimeStamp() to modify
     // timestamp and set mSkipCurrentFrame.
     // Then it calls the base CameraSource::dataCallbackTimestamp()
@@ -189,24 +147,6 @@
     // Otherwise returns false.
     bool trySettingVideoSize(int32_t width, int32_t height);
 
-    // The still camera may not support the demanded video width and height.
-    // We look for the supported picture sizes from the still camera and
-    // choose the smallest one with either dimensions higher than the corresponding
-    // video dimensions. The still picture will be cropped to get the video frame.
-    // The function returns true if the camera supports picture sizes greater than
-    // or equal to the passed in width and height, and false otherwise.
-    bool setPictureSizeToClosestSupported(int32_t width, int32_t height);
-
-    // Computes the offset of the rectangle from where to start cropping the
-    // still image into the video frame. We choose the center of the image to be
-    // cropped. The offset is stored in (mCropRectStartX, mCropRectStartY).
-    bool computeCropRectangleOffset();
-
-    // Crops the source data into a smaller image starting at
-    // (mCropRectStartX, mCropRectStartY) and of the size of the video frame.
-    // The data is returned into a newly allocated IMemory.
-    sp<IMemory> cropYUVImage(const sp<IMemory> &source_data);
-
     // When video camera is used for time lapse capture, returns true
     // until enough time has passed for the next time lapse frame. When
     // the frame needs to be encoded, it returns false and also modifies
@@ -217,22 +157,6 @@
     // Wrapper to enter threadTimeLapseEntry()
     static void *ThreadTimeLapseWrapper(void *me);
 
-    // Runs a loop which sleeps until a still picture is required
-    // and then calls mCamera->takePicture() to take the still picture.
-    // Used only in the case mUseStillCameraForTimeLapse = true.
-    void threadTimeLapseEntry();
-
-    // Wrapper to enter threadStartPreview()
-    static void *ThreadStartPreviewWrapper(void *me);
-
-    // Starts the camera's preview.
-    void threadStartPreview();
-
-    // Starts thread ThreadStartPreviewWrapper() for restarting preview.
-    // Needs to be done in a thread so that dataCallback() which calls this function
-    // can return, and the camera can know that takePicture() is done.
-    void restartPreview();
-
     // Creates a copy of source_data into a new memory of final type MemoryBase.
     sp<IMemory> createIMemoryCopy(const sp<IMemory> &source_data);
 
diff --git a/include/media/stagefright/OMXCodec.h b/include/media/stagefright/OMXCodec.h
index 20fcde5..2932744 100644
--- a/include/media/stagefright/OMXCodec.h
+++ b/include/media/stagefright/OMXCodec.h
@@ -329,6 +329,7 @@
     void restorePatchedDataPointer(BufferInfo *info);
 
     status_t applyRotation();
+    status_t waitForBufferFilled_l();
 
     int64_t retrieveDecodingTimeUs(bool isCodecSpecific);
 
@@ -348,6 +349,8 @@
 // that encode content of the given type.
 // profile and level indications only make sense for h.263, mpeg4 and avc
 // video.
+// If hwCodecOnly==true, only returns hardware-based components, software and
+// hardware otherwise.
 // The profile/level values correspond to
 // OMX_VIDEO_H263PROFILETYPE, OMX_VIDEO_MPEG4PROFILETYPE,
 // OMX_VIDEO_AVCPROFILETYPE, OMX_VIDEO_H263LEVELTYPE, OMX_VIDEO_MPEG4LEVELTYPE
@@ -358,6 +361,11 @@
         const char *mimeType, bool queryDecoders, bool hwCodecOnly,
         Vector<CodecCapabilities> *results);
 
+status_t QueryCodecs(
+        const sp<IOMX> &omx,
+        const char *mimeType, bool queryDecoders,
+        Vector<CodecCapabilities> *results);
+
 }  // namespace android
 
 #endif  // OMX_CODEC_H_
diff --git a/include/media/stagefright/SurfaceMediaSource.h b/include/media/stagefright/SurfaceMediaSource.h
index 4c1358a..fab258c 100644
--- a/include/media/stagefright/SurfaceMediaSource.h
+++ b/include/media/stagefright/SurfaceMediaSource.h
@@ -85,7 +85,7 @@
     // SurfaceMediaSource object (i.e. they are not owned by the client).
     virtual status_t setBufferCount(int bufferCount);
 
-    virtual sp<GraphicBuffer> requestBuffer(int buf);
+    virtual status_t requestBuffer(int slot, sp<GraphicBuffer>* buf);
 
     // dequeueBuffer gets the next buffer slot index for the client to use. If a
     // buffer slot is available then that slot index is written to the location
@@ -167,13 +167,6 @@
     // when a new frame becomes available.
     void setFrameAvailableListener(const sp<FrameAvailableListener>& listener);
 
-    // getAllocator retrieves the binder object that must be referenced as long
-    // as the GraphicBuffers dequeued from this SurfaceMediaSource are referenced.
-    // Holding this binder reference prevents SurfaceFlinger from freeing the
-    // buffers before the client is done with them.
-    sp<IBinder> getAllocator();
-
-
     // getCurrentBuffer returns the buffer associated with the current image.
     sp<GraphicBuffer> getCurrentBuffer() const;
 
diff --git a/media/libmedia/AudioEffect.cpp b/media/libmedia/AudioEffect.cpp
index 3919551..0633744 100644
--- a/media/libmedia/AudioEffect.cpp
+++ b/media/libmedia/AudioEffect.cpp
@@ -419,6 +419,15 @@
     return af->getEffectDescriptor(uuid, descriptor);
 }
 
+
+status_t AudioEffect::queryDefaultPreProcessing(int audioSession,
+                                          effect_descriptor_t *descriptors,
+                                          uint32_t *count)
+{
+    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    if (aps == 0) return PERMISSION_DENIED;
+    return aps->queryDefaultPreProcessing(audioSession, descriptors, count);
+}
 // -------------------------------------------------------------------------
 
 status_t AudioEffect::stringToGuid(const char *str, effect_uuid_t *guid)
diff --git a/media/libmedia/IAudioPolicyService.cpp b/media/libmedia/IAudioPolicyService.cpp
index 49d410f..15f4be0 100644
--- a/media/libmedia/IAudioPolicyService.cpp
+++ b/media/libmedia/IAudioPolicyService.cpp
@@ -53,6 +53,7 @@
     UNREGISTER_EFFECT,
     IS_STREAM_ACTIVE,
     GET_DEVICES_FOR_STREAM,
+    QUERY_DEFAULT_PRE_PROCESSING
 };
 
 class BpAudioPolicyService : public BpInterface<IAudioPolicyService>
@@ -321,6 +322,31 @@
         remote()->transact(IS_STREAM_ACTIVE, data, &reply);
         return reply.readInt32();
     }
+
+    virtual status_t queryDefaultPreProcessing(int audioSession,
+                                               effect_descriptor_t *descriptors,
+                                               uint32_t *count)
+    {
+        if (descriptors == NULL || count == NULL) {
+            return BAD_VALUE;
+        }
+        Parcel data, reply;
+        data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+        data.writeInt32(audioSession);
+        data.writeInt32(*count);
+        status_t status = remote()->transact(QUERY_DEFAULT_PRE_PROCESSING, data, &reply);
+        if (status != NO_ERROR) {
+            return status;
+        }
+        status = static_cast <status_t> (reply.readInt32());
+        uint32_t retCount = reply.readInt32();
+        if (retCount != 0) {
+            uint32_t numDesc = (retCount < *count) ? retCount : *count;
+            reply.read(descriptors, sizeof(effect_descriptor_t) * numDesc);
+        }
+        *count = retCount;
+        return status;
+    }
 };
 
 IMPLEMENT_META_INTERFACE(AudioPolicyService, "android.media.IAudioPolicyService");
@@ -559,6 +585,29 @@
             return NO_ERROR;
         } break;
 
+        case QUERY_DEFAULT_PRE_PROCESSING: {
+            CHECK_INTERFACE(IAudioPolicyService, data, reply);
+            int audioSession = data.readInt32();
+            uint32_t count = data.readInt32();
+            uint32_t retCount = count;
+            effect_descriptor_t *descriptors =
+                    (effect_descriptor_t *)new char[count * sizeof(effect_descriptor_t)];
+            status_t status = queryDefaultPreProcessing(audioSession, descriptors, &retCount);
+            reply->writeInt32(status);
+            if (status != NO_ERROR && status != NO_MEMORY) {
+                retCount = 0;
+            }
+            reply->writeInt32(retCount);
+            if (retCount) {
+                if (retCount < count) {
+                    count = retCount;
+                }
+                reply->write(descriptors, sizeof(effect_descriptor_t) * count);
+            }
+            delete[] descriptors;
+            return status;
+        }
+
         default:
             return BBinder::onTransact(code, data, reply, flags);
     }
diff --git a/media/libmedia/IMediaRecorder.cpp b/media/libmedia/IMediaRecorder.cpp
index 7e44c29..38e111e 100644
--- a/media/libmedia/IMediaRecorder.cpp
+++ b/media/libmedia/IMediaRecorder.cpp
@@ -46,7 +46,6 @@
     SET_AUDIO_ENCODER,
     SET_OUTPUT_FILE_PATH,
     SET_OUTPUT_FILE_FD,
-    SET_OUTPUT_FILE_AUXILIARY_FD,
     SET_VIDEO_SIZE,
     SET_VIDEO_FRAMERATE,
     SET_PARAMETERS,
@@ -177,15 +176,6 @@
         return reply.readInt32();
     }
 
-    status_t setOutputFileAuxiliary(int fd) {
-        LOGV("setOutputFileAuxiliary(%d)", fd);
-        Parcel data, reply;
-        data.writeInterfaceToken(IMediaRecorder::getInterfaceDescriptor());
-        data.writeFileDescriptor(fd);
-        remote()->transact(SET_OUTPUT_FILE_AUXILIARY_FD, data, &reply);
-        return reply.readInt32();
-    }
-
     status_t setVideoSize(int width, int height)
     {
         LOGV("setVideoSize(%dx%d)", width, height);
@@ -404,13 +394,6 @@
             ::close(fd);
             return NO_ERROR;
         } break;
-        case SET_OUTPUT_FILE_AUXILIARY_FD: {
-            LOGV("SET_OUTPUT_FILE_AUXILIARY_FD");
-            CHECK_INTERFACE(IMediaRecorder, data, reply);
-            int fd = dup(data.readFileDescriptor());
-            reply->writeInt32(setOutputFileAuxiliary(fd));
-            return NO_ERROR;
-        } break;
         case SET_VIDEO_SIZE: {
             LOGV("SET_VIDEO_SIZE");
             CHECK_INTERFACE(IMediaRecorder, data, reply);
diff --git a/media/libmedia/mediarecorder.cpp b/media/libmedia/mediarecorder.cpp
index fab674c..11d281f 100644
--- a/media/libmedia/mediarecorder.cpp
+++ b/media/libmedia/mediarecorder.cpp
@@ -322,32 +322,6 @@
     return ret;
 }
 
-status_t MediaRecorder::setOutputFileAuxiliary(int fd)
-{
-    LOGV("setOutputFileAuxiliary(%d)", fd);
-    if(mMediaRecorder == NULL) {
-        LOGE("media recorder is not initialized yet");
-        return INVALID_OPERATION;
-    }
-    if (mIsAuxiliaryOutputFileSet) {
-        LOGE("output file has already been set");
-        return INVALID_OPERATION;
-    }
-    if (!(mCurrentState & MEDIA_RECORDER_DATASOURCE_CONFIGURED)) {
-        LOGE("setOutputFile called in an invalid state(%d)", mCurrentState);
-        return INVALID_OPERATION;
-    }
-
-    status_t ret = mMediaRecorder->setOutputFileAuxiliary(fd);
-    if (OK != ret) {
-        LOGV("setOutputFileAuxiliary failed: %d", ret);
-        mCurrentState = MEDIA_RECORDER_ERROR;
-        return ret;
-    }
-    mIsAuxiliaryOutputFileSet = true;
-    return ret;
-}
-
 status_t MediaRecorder::setVideoSize(int width, int height)
 {
     LOGV("setVideoSize(%d, %d)", width, height);
@@ -629,7 +603,6 @@
     mIsAudioEncoderSet = false;
     mIsVideoEncoderSet = false;
     mIsOutputFileSet   = false;
-    mIsAuxiliaryOutputFileSet = false;
 }
 
 // Release should be OK in any state
diff --git a/media/libmediaplayerservice/MediaRecorderClient.cpp b/media/libmediaplayerservice/MediaRecorderClient.cpp
index 905b885..6f80b35 100644
--- a/media/libmediaplayerservice/MediaRecorderClient.cpp
+++ b/media/libmediaplayerservice/MediaRecorderClient.cpp
@@ -178,17 +178,6 @@
     return mRecorder->setOutputFile(fd, offset, length);
 }
 
-status_t MediaRecorderClient::setOutputFileAuxiliary(int fd)
-{
-    LOGV("setOutputFileAuxiliary(%d)", fd);
-    Mutex::Autolock lock(mLock);
-    if (mRecorder == NULL) {
-        LOGE("recorder is not initialized");
-        return NO_INIT;
-    }
-    return mRecorder->setOutputFileAuxiliary(fd);
-}
-
 status_t MediaRecorderClient::setVideoSize(int width, int height)
 {
     LOGV("setVideoSize(%dx%d)", width, height);
diff --git a/media/libmediaplayerservice/MediaRecorderClient.h b/media/libmediaplayerservice/MediaRecorderClient.h
index c87a3c0..c9ccf22 100644
--- a/media/libmediaplayerservice/MediaRecorderClient.h
+++ b/media/libmediaplayerservice/MediaRecorderClient.h
@@ -41,7 +41,6 @@
     virtual     status_t   setOutputFile(const char* path);
     virtual     status_t   setOutputFile(int fd, int64_t offset,
                                                   int64_t length);
-    virtual     status_t   setOutputFileAuxiliary(int fd);
     virtual     status_t   setVideoSize(int width, int height);
     virtual     status_t   setVideoFrameRate(int frames_per_second);
     virtual     status_t   setParameters(const String8& params);
diff --git a/media/libmediaplayerservice/StagefrightRecorder.cpp b/media/libmediaplayerservice/StagefrightRecorder.cpp
index 6427bb7..6fdb726 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.cpp
+++ b/media/libmediaplayerservice/StagefrightRecorder.cpp
@@ -28,9 +28,7 @@
 #include <media/stagefright/AMRWriter.h>
 #include <media/stagefright/AACWriter.h>
 #include <media/stagefright/CameraSource.h>
-#include <media/stagefright/VideoSourceDownSampler.h>
 #include <media/stagefright/CameraSourceTimeLapse.h>
-#include <media/stagefright/MediaSourceSplitter.h>
 #include <media/stagefright/MPEG2TSWriter.h>
 #include <media/stagefright/MPEG4Writer.h>
 #include <media/stagefright/MediaDebug.h>
@@ -67,8 +65,8 @@
 
 
 StagefrightRecorder::StagefrightRecorder()
-    : mWriter(NULL), mWriterAux(NULL),
-      mOutputFd(-1), mOutputFdAux(-1),
+    : mWriter(NULL),
+      mOutputFd(-1),
       mAudioSource(AUDIO_SOURCE_CNT),
       mVideoSource(VIDEO_SOURCE_LIST_END),
       mStarted(false), mSurfaceMediaSource(NULL) {
@@ -259,24 +257,6 @@
     return OK;
 }
 
-status_t StagefrightRecorder::setOutputFileAuxiliary(int fd) {
-    LOGV("setOutputFileAuxiliary: %d", fd);
-
-    if (fd < 0) {
-        LOGE("Invalid file descriptor: %d", fd);
-        return -EBADF;
-    }
-
-    mCaptureAuxVideo = true;
-
-    if (mOutputFdAux >= 0) {
-        ::close(mOutputFdAux);
-    }
-    mOutputFdAux = dup(fd);
-
-    return OK;
-}
-
 // Attempt to parse an int64 literal optionally surrounded by whitespace,
 // returns true on success, false otherwise.
 static bool safe_strtoi64(const char *s, int64_t *val) {
@@ -573,42 +553,6 @@
     return OK;
 }
 
-status_t StagefrightRecorder::setParamAuxVideoWidth(int32_t width) {
-    LOGV("setParamAuxVideoWidth : %d", width);
-
-    if (width <= 0) {
-        LOGE("Width (%d) is not positive", width);
-        return BAD_VALUE;
-    }
-
-    mAuxVideoWidth = width;
-    return OK;
-}
-
-status_t StagefrightRecorder::setParamAuxVideoHeight(int32_t height) {
-    LOGV("setParamAuxVideoHeight : %d", height);
-
-    if (height <= 0) {
-        LOGE("Height (%d) is not positive", height);
-        return BAD_VALUE;
-    }
-
-    mAuxVideoHeight = height;
-    return OK;
-}
-
-status_t StagefrightRecorder::setParamAuxVideoEncodingBitRate(int32_t bitRate) {
-    LOGV("StagefrightRecorder::setParamAuxVideoEncodingBitRate: %d", bitRate);
-
-    if (bitRate <= 0) {
-        LOGE("Invalid video encoding bit rate: %d", bitRate);
-        return BAD_VALUE;
-    }
-
-    mAuxVideoBitRate = bitRate;
-    return OK;
-}
-
 status_t StagefrightRecorder::setParamGeoDataLongitude(
     int32_t longitudex10000) {
 
@@ -738,21 +682,6 @@
             return setParamTimeBetweenTimeLapseFrameCapture(
                     1000LL * timeBetweenTimeLapseFrameCaptureMs);
         }
-    } else if (key == "video-aux-param-width") {
-        int32_t auxWidth;
-        if (safe_strtoi32(value.string(), &auxWidth)) {
-            return setParamAuxVideoWidth(auxWidth);
-        }
-    } else if (key == "video-aux-param-height") {
-        int32_t auxHeight;
-        if (safe_strtoi32(value.string(), &auxHeight)) {
-            return setParamAuxVideoHeight(auxHeight);
-        }
-    } else if (key == "video-aux-param-encoding-bitrate") {
-        int32_t auxVideoBitRate;
-        if (safe_strtoi32(value.string(), &auxVideoBitRate)) {
-            return setParamAuxVideoEncodingBitRate(auxVideoBitRate);
-        }
     } else {
         LOGE("setParameter: failed to find key %s", key.string());
     }
@@ -1517,7 +1446,6 @@
 }
 
 status_t StagefrightRecorder::setupMPEG4Recording(
-        bool useSplitCameraSource,
         int outputFd,
         int32_t videoWidth, int32_t videoHeight,
         int32_t videoBitRate,
@@ -1531,28 +1459,7 @@
     if (mVideoSource < VIDEO_SOURCE_LIST_END) {
 
         sp<MediaSource> mediaSource;
-        if (useSplitCameraSource) {
-            // TODO: Check if there is a better way to handle this
-            if (mVideoSource == VIDEO_SOURCE_GRALLOC_BUFFER) {
-                LOGE("Cannot use split camera when encoding frames");
-                return INVALID_OPERATION;
-            }
-            LOGV("Using Split camera source");
-            mediaSource = mCameraSourceSplitter->createClient();
-        } else {
-           err = setupMediaSource(&mediaSource);
-        }
-
-        if ((videoWidth != mVideoWidth) || (videoHeight != mVideoHeight)) {
-            // TODO: Might be able to handle downsampling even if using GRAlloc
-            if (mVideoSource == VIDEO_SOURCE_GRALLOC_BUFFER) {
-                LOGE("Cannot change size or Downsample when encoding frames");
-                return INVALID_OPERATION;
-            }
-            // Use downsampling from the original source.
-            mediaSource =
-                new VideoSourceDownSampler(mediaSource, videoWidth, videoHeight);
-        }
+        err = setupMediaSource(&mediaSource);
         if (err != OK) {
             return err;
         }
@@ -1620,24 +1527,8 @@
 }
 
 status_t StagefrightRecorder::startMPEG4Recording() {
-    if (mCaptureAuxVideo) {
-        if (!mCaptureTimeLapse) {
-            LOGE("Auxiliary video can be captured only in time lapse mode");
-            return UNKNOWN_ERROR;
-        }
-        LOGV("Creating MediaSourceSplitter");
-        sp<CameraSource> cameraSource;
-        status_t err = setupCameraSource(&cameraSource);
-        if (err != OK) {
-            return err;
-        }
-        mCameraSourceSplitter = new MediaSourceSplitter(cameraSource);
-    } else {
-        mCameraSourceSplitter = NULL;
-    }
-
     int32_t totalBitRate;
-    status_t err = setupMPEG4Recording(mCaptureAuxVideo,
+    status_t err = setupMPEG4Recording(
             mOutputFd, mVideoWidth, mVideoHeight,
             mVideoBitRate, &totalBitRate, &mWriter);
     if (err != OK) {
@@ -1653,33 +1544,6 @@
         return err;
     }
 
-    if (mCaptureAuxVideo) {
-        CHECK(mOutputFdAux >= 0);
-        if (mWriterAux != NULL) {
-            LOGE("Auxiliary File writer is not avaialble");
-            return UNKNOWN_ERROR;
-        }
-        if ((mAuxVideoWidth > mVideoWidth) || (mAuxVideoHeight > mVideoHeight) ||
-                ((mAuxVideoWidth == mVideoWidth) && mAuxVideoHeight == mVideoHeight)) {
-            LOGE("Auxiliary video size (%d x %d) same or larger than the main video size (%d x %d)",
-                    mAuxVideoWidth, mAuxVideoHeight, mVideoWidth, mVideoHeight);
-            return UNKNOWN_ERROR;
-        }
-
-        int32_t totalBitrateAux;
-        err = setupMPEG4Recording(mCaptureAuxVideo,
-                mOutputFdAux, mAuxVideoWidth, mAuxVideoHeight,
-                mAuxVideoBitRate, &totalBitrateAux, &mWriterAux);
-        if (err != OK) {
-            return err;
-        }
-
-        sp<MetaData> metaAux = new MetaData;
-        setupMPEG4MetaData(startTimeUs, totalBitrateAux, &metaAux);
-
-        return mWriterAux->start(metaAux.get());
-    }
-
     return OK;
 }
 
@@ -1690,13 +1554,6 @@
     }
     mWriter->pause();
 
-    if (mCaptureAuxVideo) {
-        if (mWriterAux == NULL) {
-            return UNKNOWN_ERROR;
-        }
-        mWriterAux->pause();
-    }
-
     if (mStarted) {
         mStarted = false;
 
@@ -1724,13 +1581,6 @@
         mCameraSourceTimeLapse = NULL;
     }
 
-    if (mCaptureAuxVideo) {
-        if (mWriterAux != NULL) {
-            mWriterAux->stop();
-            mWriterAux.clear();
-        }
-    }
-
     if (mWriter != NULL) {
         err = mWriter->stop();
         mWriter.clear();
@@ -1741,13 +1591,6 @@
         mOutputFd = -1;
     }
 
-    if (mCaptureAuxVideo) {
-        if (mOutputFdAux >= 0) {
-            ::close(mOutputFdAux);
-            mOutputFdAux = -1;
-        }
-    }
-
     if (mStarted) {
         mStarted = false;
 
@@ -1787,11 +1630,8 @@
     mVideoEncoder  = VIDEO_ENCODER_H263;
     mVideoWidth    = 176;
     mVideoHeight   = 144;
-    mAuxVideoWidth    = 176;
-    mAuxVideoHeight   = 144;
     mFrameRate     = -1;
     mVideoBitRate  = 192000;
-    mAuxVideoBitRate = 192000;
     mSampleRate    = 8000;
     mAudioChannels = 1;
     mAudioBitRate  = 12200;
@@ -1811,8 +1651,6 @@
     mTrackEveryTimeDurationUs = 0;
     mCaptureTimeLapse = false;
     mTimeBetweenTimeLapseFrameCaptureUs = -1;
-    mCaptureAuxVideo = false;
-    mCameraSourceSplitter = NULL;
     mCameraSourceTimeLapse = NULL;
     mIsMetaDataStoredInVideoBuffers = false;
     mEncoderProfiles = MediaProfiles::getInstance();
@@ -1821,7 +1659,6 @@
     mLongitudex10000 = -3600000;
 
     mOutputFd = -1;
-    mOutputFdAux = -1;
 
     return OK;
 }
@@ -1858,8 +1695,6 @@
     snprintf(buffer, SIZE, "   Recorder: %p\n", this);
     snprintf(buffer, SIZE, "   Output file (fd %d):\n", mOutputFd);
     result.append(buffer);
-    snprintf(buffer, SIZE, "   Output file Auxiliary (fd %d):\n", mOutputFdAux);
-    result.append(buffer);
     snprintf(buffer, SIZE, "     File format: %d\n", mOutputFormat);
     result.append(buffer);
     snprintf(buffer, SIZE, "     Max file size (bytes): %lld\n", mMaxFileSizeBytes);
@@ -1904,14 +1739,10 @@
     result.append(buffer);
     snprintf(buffer, SIZE, "     Frame size (pixels): %dx%d\n", mVideoWidth, mVideoHeight);
     result.append(buffer);
-    snprintf(buffer, SIZE, "     Aux Frame size (pixels): %dx%d\n", mAuxVideoWidth, mAuxVideoHeight);
-    result.append(buffer);
     snprintf(buffer, SIZE, "     Frame rate (fps): %d\n", mFrameRate);
     result.append(buffer);
     snprintf(buffer, SIZE, "     Bit rate (bps): %d\n", mVideoBitRate);
     result.append(buffer);
-    snprintf(buffer, SIZE, "     Aux Bit rate (bps): %d\n", mAuxVideoBitRate);
-    result.append(buffer);
     ::write(fd, result.string(), result.size());
     return OK;
 }
diff --git a/media/libmediaplayerservice/StagefrightRecorder.h b/media/libmediaplayerservice/StagefrightRecorder.h
index 1618b92..5c5f05c 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.h
+++ b/media/libmediaplayerservice/StagefrightRecorder.h
@@ -30,7 +30,6 @@
 class ICameraRecordingProxy;
 class CameraSource;
 class CameraSourceTimeLapse;
-class MediaSourceSplitter;
 struct MediaSource;
 struct MediaWriter;
 class MetaData;
@@ -55,7 +54,6 @@
     virtual status_t setPreviewSurface(const sp<Surface>& surface);
     virtual status_t setOutputFile(const char *path);
     virtual status_t setOutputFile(int fd, int64_t offset, int64_t length);
-    virtual status_t setOutputFileAuxiliary(int fd);
     virtual status_t setParameters(const String8& params);
     virtual status_t setListener(const sp<IMediaRecorderClient>& listener);
     virtual status_t prepare();
@@ -74,8 +72,8 @@
     sp<ICameraRecordingProxy> mCameraProxy;
     sp<Surface> mPreviewSurface;
     sp<IMediaRecorderClient> mListener;
-    sp<MediaWriter> mWriter, mWriterAux;
-    int mOutputFd, mOutputFdAux;
+    sp<MediaWriter> mWriter;
+    int mOutputFd;
     sp<AudioSource> mAudioSourceNode;
 
     audio_source_t mAudioSource;
@@ -85,9 +83,8 @@
     video_encoder mVideoEncoder;
     bool mUse64BitFileOffset;
     int32_t mVideoWidth, mVideoHeight;
-    int32_t mAuxVideoWidth, mAuxVideoHeight;
     int32_t mFrameRate;
-    int32_t mVideoBitRate, mAuxVideoBitRate;
+    int32_t mVideoBitRate;
     int32_t mAudioBitRate;
     int32_t mAudioChannels;
     int32_t mSampleRate;
@@ -109,8 +106,6 @@
 
     bool mCaptureTimeLapse;
     int64_t mTimeBetweenTimeLapseFrameCaptureUs;
-    bool mCaptureAuxVideo;
-    sp<MediaSourceSplitter> mCameraSourceSplitter;
     sp<CameraSourceTimeLapse> mCameraSourceTimeLapse;
 
 
@@ -127,7 +122,6 @@
     sp<SurfaceMediaSource> mSurfaceMediaSource;
 
     status_t setupMPEG4Recording(
-        bool useSplitCameraSource,
         int outputFd,
         int32_t videoWidth, int32_t videoHeight,
         int32_t videoBitRate,
@@ -166,9 +160,6 @@
     status_t setParamAudioTimeScale(int32_t timeScale);
     status_t setParamTimeLapseEnable(int32_t timeLapseEnable);
     status_t setParamTimeBetweenTimeLapseFrameCapture(int64_t timeUs);
-    status_t setParamAuxVideoHeight(int32_t height);
-    status_t setParamAuxVideoWidth(int32_t width);
-    status_t setParamAuxVideoEncodingBitRate(int32_t bitRate);
     status_t setParamVideoEncodingBitRate(int32_t bitRate);
     status_t setParamVideoIFramesInterval(int32_t seconds);
     status_t setParamVideoEncoderProfile(int32_t profile);
diff --git a/media/libstagefright/CameraSourceTimeLapse.cpp b/media/libstagefright/CameraSourceTimeLapse.cpp
index fe78c46..1ba79e5 100644
--- a/media/libstagefright/CameraSourceTimeLapse.cpp
+++ b/media/libstagefright/CameraSourceTimeLapse.cpp
@@ -24,15 +24,10 @@
 #include <media/stagefright/CameraSourceTimeLapse.h>
 #include <media/stagefright/MediaDebug.h>
 #include <media/stagefright/MetaData.h>
-#include <media/stagefright/YUVImage.h>
-#include <media/stagefright/YUVCanvas.h>
 #include <camera/Camera.h>
 #include <camera/CameraParameters.h>
-#include <ui/Rect.h>
 #include <utils/String8.h>
 #include <utils/Vector.h>
-#include "OMX_Video.h"
-#include <limits.h>
 
 namespace android {
 
@@ -74,20 +69,14 @@
       mLastTimeLapseFrameRealTimestampUs(0),
       mSkipCurrentFrame(false) {
 
-    LOGD("starting time lapse mode: %lld us", mTimeBetweenTimeLapseFrameCaptureUs);
+    LOGD("starting time lapse mode: %lld us",
+        mTimeBetweenTimeLapseFrameCaptureUs);
+
     mVideoWidth = videoSize.width;
     mVideoHeight = videoSize.height;
 
-    if (trySettingVideoSize(videoSize.width, videoSize.height)) {
-        mUseStillCameraForTimeLapse = false;
-    } else {
-        // TODO: Add a check to see that mTimeBetweenTimeLapseFrameCaptureUs is greater
-        // than the fastest rate at which the still camera can take pictures.
-        mUseStillCameraForTimeLapse = true;
-        CHECK(setPictureSizeToClosestSupported(videoSize.width, videoSize.height));
-        mNeedCropping = computeCropRectangleOffset();
-        mMeta->setInt32(kKeyWidth, videoSize.width);
-        mMeta->setInt32(kKeyHeight, videoSize.height);
+    if (!trySettingVideoSize(videoSize.width, videoSize.height)) {
+        mInitCheck = NO_INIT;
     }
 
     // Initialize quick stop variables.
@@ -101,24 +90,22 @@
 }
 
 void CameraSourceTimeLapse::startQuickReadReturns() {
+    LOGV("startQuickReadReturns");
     Mutex::Autolock autoLock(mQuickStopLock);
-    LOGV("Enabling quick read returns");
 
     // Enable quick stop mode.
     mQuickStop = true;
 
-    if (mUseStillCameraForTimeLapse) {
-        // wake up the thread right away.
-        mTakePictureCondition.signal();
-    } else {
-        // Force dataCallbackTimestamp() coming from the video camera to not skip the
-        // next frame as we want read() to get a get a frame right away.
-        mForceRead = true;
-    }
+    // Force dataCallbackTimestamp() coming from the video camera to
+    // not skip the next frame as we want read() to get a get a frame
+    // right away.
+    mForceRead = true;
 }
 
-bool CameraSourceTimeLapse::trySettingVideoSize(int32_t width, int32_t height) {
-    LOGV("trySettingVideoSize: %dx%d", width, height);
+bool CameraSourceTimeLapse::trySettingVideoSize(
+        int32_t width, int32_t height) {
+
+    LOGV("trySettingVideoSize");
     int64_t token = IPCThreadState::self()->clearCallingIdentity();
     String8 s = mCamera->getParameters();
 
@@ -162,53 +149,8 @@
     return isSuccessful;
 }
 
-bool CameraSourceTimeLapse::setPictureSizeToClosestSupported(int32_t width, int32_t height) {
-    LOGV("setPictureSizeToClosestSupported: %dx%d", width, height);
-    int64_t token = IPCThreadState::self()->clearCallingIdentity();
-    String8 s = mCamera->getParameters();
-    IPCThreadState::self()->restoreCallingIdentity(token);
-
-    CameraParameters params(s);
-    Vector<Size> supportedSizes;
-    params.getSupportedPictureSizes(supportedSizes);
-
-    int32_t minPictureSize = INT_MAX;
-    for (uint32_t i = 0; i < supportedSizes.size(); ++i) {
-        int32_t pictureWidth = supportedSizes[i].width;
-        int32_t pictureHeight = supportedSizes[i].height;
-
-        if ((pictureWidth >= width) && (pictureHeight >= height)) {
-            int32_t pictureSize = pictureWidth*pictureHeight;
-            if (pictureSize < minPictureSize) {
-                minPictureSize = pictureSize;
-                mPictureWidth = pictureWidth;
-                mPictureHeight = pictureHeight;
-            }
-        }
-    }
-    LOGV("Picture size = (%d, %d)", mPictureWidth, mPictureHeight);
-    return (minPictureSize != INT_MAX);
-}
-
-bool CameraSourceTimeLapse::computeCropRectangleOffset() {
-    if ((mPictureWidth == mVideoWidth) && (mPictureHeight == mVideoHeight)) {
-        return false;
-    }
-
-    CHECK((mPictureWidth > mVideoWidth) && (mPictureHeight > mVideoHeight));
-
-    int32_t widthDifference = mPictureWidth - mVideoWidth;
-    int32_t heightDifference = mPictureHeight - mVideoHeight;
-
-    mCropRectStartX = widthDifference/2;
-    mCropRectStartY = heightDifference/2;
-
-    LOGV("setting crop rectangle offset to (%d, %d)", mCropRectStartX, mCropRectStartY);
-
-    return true;
-}
-
 void CameraSourceTimeLapse::signalBufferReturned(MediaBuffer* buffer) {
+    LOGV("signalBufferReturned");
     Mutex::Autolock autoLock(mQuickStopLock);
     if (mQuickStop && (buffer == mLastReadBufferCopy)) {
         buffer->setObserver(NULL);
@@ -218,7 +160,12 @@
     }
 }
 
-void createMediaBufferCopy(const MediaBuffer& sourceBuffer, int64_t frameTime, MediaBuffer **newBuffer) {
+void createMediaBufferCopy(
+        const MediaBuffer& sourceBuffer,
+        int64_t frameTime,
+        MediaBuffer **newBuffer) {
+
+    LOGV("createMediaBufferCopy");
     size_t sourceSize = sourceBuffer.size();
     void* sourcePointer = sourceBuffer.data();
 
@@ -229,6 +176,7 @@
 }
 
 void CameraSourceTimeLapse::fillLastReadBufferCopy(MediaBuffer& sourceBuffer) {
+    LOGV("fillLastReadBufferCopy");
     int64_t frameTime;
     CHECK(sourceBuffer.meta_data()->findInt64(kKeyTime, &frameTime));
     createMediaBufferCopy(sourceBuffer, frameTime, &mLastReadBufferCopy);
@@ -238,11 +186,12 @@
 
 status_t CameraSourceTimeLapse::read(
         MediaBuffer **buffer, const ReadOptions *options) {
+    LOGV("read");
     if (mLastReadBufferCopy == NULL) {
         mLastReadStatus = CameraSource::read(buffer, options);
 
-        // mQuickStop may have turned to true while read was blocked. Make a copy of
-        // the buffer in that case.
+        // mQuickStop may have turned to true while read was blocked.
+        // Make a copy of the buffer in that case.
         Mutex::Autolock autoLock(mQuickStopLock);
         if (mQuickStop && *buffer) {
             fillLastReadBufferCopy(**buffer);
@@ -255,105 +204,19 @@
     }
 }
 
-// static
-void *CameraSourceTimeLapse::ThreadTimeLapseWrapper(void *me) {
-    CameraSourceTimeLapse *source = static_cast<CameraSourceTimeLapse *>(me);
-    source->threadTimeLapseEntry();
-    return NULL;
-}
-
-void CameraSourceTimeLapse::threadTimeLapseEntry() {
-    while (mStarted) {
-        {
-            Mutex::Autolock autoLock(mCameraIdleLock);
-            if (!mCameraIdle) {
-                mCameraIdleCondition.wait(mCameraIdleLock);
-            }
-            CHECK(mCameraIdle);
-            mCameraIdle = false;
-        }
-
-        // Even if mQuickStop == true we need to take one more picture
-        // as a read() may be blocked, waiting for a frame to get available.
-        // After this takePicture, if mQuickStop == true, we can safely exit
-        // this thread as read() will make a copy of this last frame and keep
-        // returning it in the quick stop mode.
-        Mutex::Autolock autoLock(mQuickStopLock);
-        CHECK_EQ(OK, mCamera->takePicture(CAMERA_MSG_RAW_IMAGE));
-        if (mQuickStop) {
-            LOGV("threadTimeLapseEntry: Exiting due to mQuickStop = true");
-            return;
-        }
-        mTakePictureCondition.waitRelative(mQuickStopLock,
-                mTimeBetweenTimeLapseFrameCaptureUs * 1000);
-    }
-    LOGV("threadTimeLapseEntry: Exiting due to mStarted = false");
-}
-
-void CameraSourceTimeLapse::startCameraRecording() {
-    if (mUseStillCameraForTimeLapse) {
-        LOGV("start time lapse recording using still camera");
-
-        int64_t token = IPCThreadState::self()->clearCallingIdentity();
-        String8 s = mCamera->getParameters();
-
-        CameraParameters params(s);
-        params.setPictureSize(mPictureWidth, mPictureHeight);
-        mCamera->setParameters(params.flatten());
-        mCameraIdle = true;
-        mStopWaitingForIdleCamera = false;
-
-        // disable shutter sound and play the recording sound.
-        mCamera->sendCommand(CAMERA_CMD_ENABLE_SHUTTER_SOUND, 0, 0);
-        mCamera->sendCommand(CAMERA_CMD_PLAY_RECORDING_SOUND, 0, 0);
-        IPCThreadState::self()->restoreCallingIdentity(token);
-
-        // create a thread which takes pictures in a loop
-        pthread_attr_t attr;
-        pthread_attr_init(&attr);
-        pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
-
-        pthread_create(&mThreadTimeLapse, &attr, ThreadTimeLapseWrapper, this);
-        pthread_attr_destroy(&attr);
-    } else {
-        LOGV("start time lapse recording using video camera");
-        CameraSource::startCameraRecording();
-    }
-}
-
 void CameraSourceTimeLapse::stopCameraRecording() {
-    if (mUseStillCameraForTimeLapse) {
-        void *dummy;
-        pthread_join(mThreadTimeLapse, &dummy);
-
-        // Last takePicture may still be underway. Wait for the camera to get
-        // idle.
-        Mutex::Autolock autoLock(mCameraIdleLock);
-        mStopWaitingForIdleCamera = true;
-        if (!mCameraIdle) {
-            mCameraIdleCondition.wait(mCameraIdleLock);
-        }
-        CHECK(mCameraIdle);
-        mCamera->setListener(NULL);
-
-        // play the recording sound.
-        mCamera->sendCommand(CAMERA_CMD_PLAY_RECORDING_SOUND, 0, 0);
-    } else {
-        CameraSource::stopCameraRecording();
-    }
+    LOGV("stopCameraRecording");
+    CameraSource::stopCameraRecording();
     if (mLastReadBufferCopy) {
         mLastReadBufferCopy->release();
         mLastReadBufferCopy = NULL;
     }
 }
 
-void CameraSourceTimeLapse::releaseRecordingFrame(const sp<IMemory>& frame) {
-    if (!mUseStillCameraForTimeLapse) {
-        CameraSource::releaseRecordingFrame(frame);
-    }
-}
+sp<IMemory> CameraSourceTimeLapse::createIMemoryCopy(
+        const sp<IMemory> &source_data) {
 
-sp<IMemory> CameraSourceTimeLapse::createIMemoryCopy(const sp<IMemory> &source_data) {
+    LOGV("createIMemoryCopy");
     size_t source_size = source_data->size();
     void* source_pointer = source_data->pointer();
 
@@ -363,102 +226,8 @@
     return newMemory;
 }
 
-// Allocates IMemory of final type MemoryBase with the given size.
-sp<IMemory> allocateIMemory(size_t size) {
-    sp<MemoryHeapBase> newMemoryHeap = new MemoryHeapBase(size);
-    sp<MemoryBase> newMemory = new MemoryBase(newMemoryHeap, 0, size);
-    return newMemory;
-}
-
-// static
-void *CameraSourceTimeLapse::ThreadStartPreviewWrapper(void *me) {
-    CameraSourceTimeLapse *source = static_cast<CameraSourceTimeLapse *>(me);
-    source->threadStartPreview();
-    return NULL;
-}
-
-void CameraSourceTimeLapse::threadStartPreview() {
-    CHECK_EQ(OK, mCamera->startPreview());
-    Mutex::Autolock autoLock(mCameraIdleLock);
-    mCameraIdle = true;
-    mCameraIdleCondition.signal();
-}
-
-void CameraSourceTimeLapse::restartPreview() {
-    // Start this in a different thread, so that the dataCallback can return
-    LOGV("restartPreview");
-    pthread_attr_t attr;
-    pthread_attr_init(&attr);
-    pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
-
-    pthread_t threadPreview;
-    pthread_create(&threadPreview, &attr, ThreadStartPreviewWrapper, this);
-    pthread_attr_destroy(&attr);
-}
-
-sp<IMemory> CameraSourceTimeLapse::cropYUVImage(const sp<IMemory> &source_data) {
-    // find the YUV format
-    int32_t srcFormat;
-    CHECK(mMeta->findInt32(kKeyColorFormat, &srcFormat));
-    YUVImage::YUVFormat yuvFormat;
-    if (srcFormat == OMX_COLOR_FormatYUV420SemiPlanar) {
-        yuvFormat = YUVImage::YUV420SemiPlanar;
-    } else {
-        CHECK_EQ(srcFormat, OMX_COLOR_FormatYUV420Planar);
-        yuvFormat = YUVImage::YUV420Planar;
-    }
-
-    // allocate memory for cropped image and setup a canvas using it.
-    sp<IMemory> croppedImageMemory = allocateIMemory(
-            YUVImage::bufferSize(yuvFormat, mVideoWidth, mVideoHeight));
-    YUVImage yuvImageCropped(yuvFormat,
-            mVideoWidth, mVideoHeight,
-            (uint8_t *)croppedImageMemory->pointer());
-    YUVCanvas yuvCanvasCrop(yuvImageCropped);
-
-    YUVImage yuvImageSource(yuvFormat,
-            mPictureWidth, mPictureHeight,
-            (uint8_t *)source_data->pointer());
-    yuvCanvasCrop.CopyImageRect(
-            Rect(mCropRectStartX, mCropRectStartY,
-                mCropRectStartX + mVideoWidth,
-                mCropRectStartY + mVideoHeight),
-            0, 0,
-            yuvImageSource);
-
-    return croppedImageMemory;
-}
-
-void CameraSourceTimeLapse::dataCallback(int32_t msgType, const sp<IMemory> &data) {
-    if (msgType == CAMERA_MSG_COMPRESSED_IMAGE) {
-        // takePicture will complete after this callback, so restart preview.
-        restartPreview();
-        return;
-    }
-    if (msgType != CAMERA_MSG_RAW_IMAGE) {
-        return;
-    }
-
-    LOGV("dataCallback for timelapse still frame");
-    CHECK_EQ(true, mUseStillCameraForTimeLapse);
-
-    int64_t timestampUs;
-    if (mNumFramesReceived == 0) {
-        timestampUs = mStartTimeUs;
-    } else {
-        timestampUs = mLastFrameTimestampUs + mTimeBetweenTimeLapseVideoFramesUs;
-    }
-
-    if (mNeedCropping) {
-        sp<IMemory> croppedImageData = cropYUVImage(data);
-        dataCallbackTimestamp(timestampUs, msgType, croppedImageData);
-    } else {
-        sp<IMemory> dataCopy = createIMemoryCopy(data);
-        dataCallbackTimestamp(timestampUs, msgType, dataCopy);
-    }
-}
-
 bool CameraSourceTimeLapse::skipCurrentFrame(int64_t timestampUs) {
+    LOGV("skipCurrentFrame");
     if (mSkipCurrentFrame) {
         mSkipCurrentFrame = false;
         return true;
@@ -468,72 +237,58 @@
 }
 
 bool CameraSourceTimeLapse::skipFrameAndModifyTimeStamp(int64_t *timestampUs) {
-    if (!mUseStillCameraForTimeLapse) {
-        if (mLastTimeLapseFrameRealTimestampUs == 0) {
-            // First time lapse frame. Initialize mLastTimeLapseFrameRealTimestampUs
-            // to current time (timestampUs) and save frame data.
-            LOGV("dataCallbackTimestamp timelapse: initial frame");
+    LOGV("skipFrameAndModifyTimeStamp");
+    if (mLastTimeLapseFrameRealTimestampUs == 0) {
+        // First time lapse frame. Initialize mLastTimeLapseFrameRealTimestampUs
+        // to current time (timestampUs) and save frame data.
+        LOGV("dataCallbackTimestamp timelapse: initial frame");
 
-            mLastTimeLapseFrameRealTimestampUs = *timestampUs;
+        mLastTimeLapseFrameRealTimestampUs = *timestampUs;
+        return false;
+    }
+
+    {
+        Mutex::Autolock autoLock(mQuickStopLock);
+
+        // mForceRead may be set to true by startQuickReadReturns(). In that
+        // case don't skip this frame.
+        if (mForceRead) {
+            LOGV("dataCallbackTimestamp timelapse: forced read");
+            mForceRead = false;
+            *timestampUs =
+                mLastFrameTimestampUs + mTimeBetweenTimeLapseVideoFramesUs;
             return false;
         }
+    }
 
-        {
-            Mutex::Autolock autoLock(mQuickStopLock);
+    // Workaround to bypass the first 2 input frames for skipping.
+    // The first 2 output frames from the encoder are: decoder specific info and
+    // the compressed video frame data for the first input video frame.
+    if (mNumFramesEncoded >= 1 && *timestampUs <
+        (mLastTimeLapseFrameRealTimestampUs + mTimeBetweenTimeLapseFrameCaptureUs)) {
+        // Skip all frames from last encoded frame until
+        // sufficient time (mTimeBetweenTimeLapseFrameCaptureUs) has passed.
+        // Tell the camera to release its recording frame and return.
+        LOGV("dataCallbackTimestamp timelapse: skipping intermediate frame");
+        return true;
+    } else {
+        // Desired frame has arrived after mTimeBetweenTimeLapseFrameCaptureUs time:
+        // - Reset mLastTimeLapseFrameRealTimestampUs to current time.
+        // - Artificially modify timestampUs to be one frame time (1/framerate) ahead
+        // of the last encoded frame's time stamp.
+        LOGV("dataCallbackTimestamp timelapse: got timelapse frame");
 
-            // mForceRead may be set to true by startQuickReadReturns(). In that
-            // case don't skip this frame.
-            if (mForceRead) {
-                LOGV("dataCallbackTimestamp timelapse: forced read");
-                mForceRead = false;
-                *timestampUs =
-                    mLastFrameTimestampUs + mTimeBetweenTimeLapseVideoFramesUs;
-                return false;
-            }
-        }
-
-        // Workaround to bypass the first 2 input frames for skipping.
-        // The first 2 output frames from the encoder are: decoder specific info and
-        // the compressed video frame data for the first input video frame.
-        if (mNumFramesEncoded >= 1 && *timestampUs <
-                (mLastTimeLapseFrameRealTimestampUs + mTimeBetweenTimeLapseFrameCaptureUs)) {
-            // Skip all frames from last encoded frame until
-            // sufficient time (mTimeBetweenTimeLapseFrameCaptureUs) has passed.
-            // Tell the camera to release its recording frame and return.
-            LOGV("dataCallbackTimestamp timelapse: skipping intermediate frame");
-            return true;
-        } else {
-            // Desired frame has arrived after mTimeBetweenTimeLapseFrameCaptureUs time:
-            // - Reset mLastTimeLapseFrameRealTimestampUs to current time.
-            // - Artificially modify timestampUs to be one frame time (1/framerate) ahead
-            // of the last encoded frame's time stamp.
-            LOGV("dataCallbackTimestamp timelapse: got timelapse frame");
-
-            mLastTimeLapseFrameRealTimestampUs = *timestampUs;
-            *timestampUs = mLastFrameTimestampUs + mTimeBetweenTimeLapseVideoFramesUs;
-            return false;
-        }
+        mLastTimeLapseFrameRealTimestampUs = *timestampUs;
+        *timestampUs = mLastFrameTimestampUs + mTimeBetweenTimeLapseVideoFramesUs;
+        return false;
     }
     return false;
 }
 
 void CameraSourceTimeLapse::dataCallbackTimestamp(int64_t timestampUs, int32_t msgType,
             const sp<IMemory> &data) {
-    if (!mUseStillCameraForTimeLapse) {
-        mSkipCurrentFrame = skipFrameAndModifyTimeStamp(&timestampUs);
-    } else {
-        Mutex::Autolock autoLock(mCameraIdleLock);
-        // If we are using the still camera and stop() has been called, it may
-        // be waiting for the camera to get idle. In that case return
-        // immediately. Calling CameraSource::dataCallbackTimestamp() will lead
-        // to a deadlock since it tries to access CameraSource::mLock which in
-        // this case is held by CameraSource::stop() currently waiting for the
-        // camera to get idle. And camera will not get idle until this call
-        // returns.
-        if (mStopWaitingForIdleCamera) {
-            return;
-        }
-    }
+    LOGV("dataCallbackTimestamp");
+    mSkipCurrentFrame = skipFrameAndModifyTimeStamp(&timestampUs);
     CameraSource::dataCallbackTimestamp(timestampUs, msgType, data);
 }
 
diff --git a/media/libstagefright/OMXCodec.cpp b/media/libstagefright/OMXCodec.cpp
index 7bcbdcf..ac73351 100755
--- a/media/libstagefright/OMXCodec.cpp
+++ b/media/libstagefright/OMXCodec.cpp
@@ -48,6 +48,10 @@
 
 namespace android {
 
+// Treat time out as an error if we have not received any output
+// buffers after 3 seconds.
+const static int64_t kBufferFilledEventTimeOutUs = 3000000000LL;
+
 struct CodecInfo {
     const char *mime;
     const char *codec;
@@ -3191,6 +3195,16 @@
     mBufferFilled.signal();
 }
 
+status_t OMXCodec::waitForBufferFilled_l() {
+    status_t err = mBufferFilled.waitRelative(mLock, kBufferFilledEventTimeOutUs);
+    if (err != OK) {
+        LOGE("Timed out waiting for buffers from video encoder: %d/%d",
+            countBuffersWeOwn(mPortBuffers[kPortIndexInput]),
+            countBuffersWeOwn(mPortBuffers[kPortIndexOutput]));
+    }
+    return err;
+}
+
 void OMXCodec::setRawAudioFormat(
         OMX_U32 portIndex, int32_t sampleRate, int32_t numChannels) {
 
@@ -3623,6 +3637,7 @@
 
 status_t OMXCodec::read(
         MediaBuffer **buffer, const ReadOptions *options) {
+    status_t err = OK;
     *buffer = NULL;
 
     Mutex::Autolock autoLock(mLock);
@@ -3663,7 +3678,9 @@
 
     if (seeking) {
         while (mState == RECONFIGURING) {
-            mBufferFilled.wait(mLock);
+            if ((err = waitForBufferFilled_l()) != OK) {
+                return err;
+            }
         }
 
         if (mState != EXECUTING) {
@@ -3694,19 +3711,15 @@
         }
 
         while (mSeekTimeUs >= 0) {
-            mBufferFilled.wait(mLock);
+            if ((err = waitForBufferFilled_l()) != OK) {
+                return err;
+            }
         }
     }
 
     while (mState != ERROR && !mNoMoreOutputData && mFilledBuffers.empty()) {
-        if (mIsEncoder) {
-            if (NO_ERROR != mBufferFilled.waitRelative(mLock, 3000000000LL)) {
-                LOGW("Timed out waiting for buffers from video encoder: %d/%d",
-                    countBuffersWeOwn(mPortBuffers[kPortIndexInput]),
-                    countBuffersWeOwn(mPortBuffers[kPortIndexOutput]));
-            }
-        } else {
-            mBufferFilled.wait(mLock);
+        if ((err = waitForBufferFilled_l()) != OK) {
+            return err;
         }
     }
 
@@ -4415,6 +4428,13 @@
     return OK;
 }
 
+status_t QueryCodecs(
+        const sp<IOMX> &omx,
+        const char *mimeType, bool queryDecoders,
+        Vector<CodecCapabilities> *results) {
+    return QueryCodecs(omx, mimeType, queryDecoders, false /*hwCodecOnly*/, results);
+}
+
 void OMXCodec::restorePatchedDataPointer(BufferInfo *info) {
     CHECK(mIsEncoder && (mQuirks & kAvoidMemcopyInputRecordingFrames));
     CHECK(mOMXLivesLocally);
diff --git a/media/libstagefright/SurfaceMediaSource.cpp b/media/libstagefright/SurfaceMediaSource.cpp
index 1e682c3..3d8c56a 100644
--- a/media/libstagefright/SurfaceMediaSource.cpp
+++ b/media/libstagefright/SurfaceMediaSource.cpp
@@ -148,16 +148,17 @@
     return OK;
 }
 
-sp<GraphicBuffer> SurfaceMediaSource::requestBuffer(int buf) {
+status_t SurfaceMediaSource::requestBuffer(int slot, sp<GraphicBuffer>* buf) {
     LOGV("SurfaceMediaSource::requestBuffer");
     Mutex::Autolock lock(mMutex);
-    if (buf < 0 || mBufferCount <= buf) {
+    if (slot < 0 || mBufferCount <= slot) {
         LOGE("requestBuffer: slot index out of range [0, %d]: %d",
-                mBufferCount, buf);
-        return 0;
+                mBufferCount, slot);
+        return BAD_VALUE;
     }
-    mSlots[buf].mRequestBufferCalled = true;
-    return mSlots[buf].mGraphicBuffer;
+    mSlots[slot].mRequestBufferCalled = true;
+    *buf = mSlots[slot].mGraphicBuffer;
+    return NO_ERROR;
 }
 
 status_t SurfaceMediaSource::dequeueBuffer(int *outBuf, uint32_t w, uint32_t h,
@@ -526,12 +527,6 @@
     mFrameAvailableListener = listener;
 }
 
-sp<IBinder> SurfaceMediaSource::getAllocator() {
-    LOGV("getAllocator");
-    return mGraphicBufferAlloc->asBinder();
-}
-
-
 void SurfaceMediaSource::freeAllBuffers() {
     LOGV("freeAllBuffers");
     for (int i = 0; i < NUM_BUFFER_SLOTS; i++) {
diff --git a/media/libstagefright/WAVExtractor.cpp b/media/libstagefright/WAVExtractor.cpp
index bf978d7..c406964 100644
--- a/media/libstagefright/WAVExtractor.cpp
+++ b/media/libstagefright/WAVExtractor.cpp
@@ -370,7 +370,9 @@
 
             int16_t *dst = (int16_t *)tmp->data();
             const uint8_t *src = (const uint8_t *)buffer->data();
-            while (n-- > 0) {
+            ssize_t numBytes = n;
+
+            while (numBytes-- > 0) {
                 *dst++ = ((int16_t)(*src) - 128) * 256;
                 ++src;
             }
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 0323fe0..cb1f921 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -1043,6 +1043,25 @@
     return NO_ERROR;
 }
 
+status_t AudioFlinger::ThreadBase::dumpEffectChains(int fd, const Vector<String16>& args)
+{
+    const size_t SIZE = 256;
+    char buffer[SIZE];
+    String8 result;
+
+    snprintf(buffer, SIZE, "\n- %d Effect Chains:\n", mEffectChains.size());
+    write(fd, buffer, strlen(buffer));
+
+    for (size_t i = 0; i < mEffectChains.size(); ++i) {
+        sp<EffectChain> chain = mEffectChains[i];
+        if (chain != 0) {
+            chain->dump(fd, args);
+        }
+    }
+    return NO_ERROR;
+}
+
+
 // ----------------------------------------------------------------------------
 
 AudioFlinger::PlaybackThread::PlaybackThread(const sp<AudioFlinger>& audioFlinger,
@@ -1111,24 +1130,6 @@
     return NO_ERROR;
 }
 
-status_t AudioFlinger::PlaybackThread::dumpEffectChains(int fd, const Vector<String16>& args)
-{
-    const size_t SIZE = 256;
-    char buffer[SIZE];
-    String8 result;
-
-    snprintf(buffer, SIZE, "\n- %d Effect Chains:\n", mEffectChains.size());
-    write(fd, buffer, strlen(buffer));
-
-    for (size_t i = 0; i < mEffectChains.size(); ++i) {
-        sp<EffectChain> chain = mEffectChains[i];
-        if (chain != 0) {
-            chain->dump(fd, args);
-        }
-    }
-    return NO_ERROR;
-}
-
 status_t AudioFlinger::PlaybackThread::dumpInternals(int fd, const Vector<String16>& args)
 {
     const size_t SIZE = 256;
@@ -4178,6 +4179,7 @@
     write(fd, result.string(), result.size());
 
     dumpBase(fd, args);
+    dumpEffectChains(fd, args);
 
     return NO_ERROR;
 }
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index fff4f06..e2cf946 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -285,6 +285,7 @@
         };
 
         status_t dumpBase(int fd, const Vector<String16>& args);
+        status_t dumpEffectChains(int fd, const Vector<String16>& args);
 
         // base for record and playback
         class TrackBase : public AudioBufferProvider, public RefBase {
@@ -724,7 +725,6 @@
 
         virtual status_t    dumpInternals(int fd, const Vector<String16>& args);
         status_t    dumpTracks(int fd, const Vector<String16>& args);
-        status_t    dumpEffectChains(int fd, const Vector<String16>& args);
 
         SortedVector< sp<Track> >       mTracks;
         // mStreamTypes[] uses 1 additionnal stream type internally for the OutputTrack used by DuplicatingThread
diff --git a/services/audioflinger/AudioPolicyService.cpp b/services/audioflinger/AudioPolicyService.cpp
index dd1e153..6d06d83 100644
--- a/services/audioflinger/AudioPolicyService.cpp
+++ b/services/audioflinger/AudioPolicyService.cpp
@@ -497,6 +497,43 @@
     return mpAudioPolicy->is_stream_active(mpAudioPolicy, stream, inPastMs);
 }
 
+status_t AudioPolicyService::queryDefaultPreProcessing(int audioSession,
+                                                       effect_descriptor_t *descriptors,
+                                                       uint32_t *count)
+{
+
+    if (mpAudioPolicy == NULL) {
+        *count = 0;
+        return NO_INIT;
+    }
+    Mutex::Autolock _l(mLock);
+    status_t status = NO_ERROR;
+
+    size_t index;
+    for (index = 0; index < mInputs.size(); index++) {
+        if (mInputs.valueAt(index)->mSessionId == audioSession) {
+            break;
+        }
+    }
+    if (index == mInputs.size()) {
+        *count = 0;
+        return BAD_VALUE;
+    }
+    Vector< sp<AudioEffect> > effects = mInputs.valueAt(index)->mEffects;
+
+    for (size_t i = 0; i < effects.size(); i++) {
+        effect_descriptor_t desc = effects[i]->descriptor();
+        if (i < *count) {
+            memcpy(descriptors + i, &desc, sizeof(effect_descriptor_t));
+        }
+    }
+    if (effects.size() > *count) {
+        status = NO_MEMORY;
+    }
+    *count = effects.size();
+    return status;
+}
+
 void AudioPolicyService::binderDied(const wp<IBinder>& who) {
     LOGW("binderDied() %p, tid %d, calling tid %d", who.unsafe_get(), gettid(),
             IPCThreadState::self()->getCallingPid());
diff --git a/services/audioflinger/AudioPolicyService.h b/services/audioflinger/AudioPolicyService.h
index 62ad29e..834b794 100644
--- a/services/audioflinger/AudioPolicyService.h
+++ b/services/audioflinger/AudioPolicyService.h
@@ -104,6 +104,9 @@
     virtual status_t unregisterEffect(int id);
     virtual bool isStreamActive(int stream, uint32_t inPastMs = 0) const;
 
+    virtual status_t queryDefaultPreProcessing(int audioSession,
+                                              effect_descriptor_t *descriptors,
+                                              uint32_t *count);
     virtual     status_t    onTransact(
                                 uint32_t code,
                                 const Parcel& data,