Merge "StagefrightRecorder: webm (video only) support" into lmp-dev
diff --git a/include/media/AudioPolicyHelper.h b/include/media/AudioPolicyHelper.h
new file mode 100644
index 0000000..f4afd45
--- /dev/null
+++ b/include/media/AudioPolicyHelper.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef AUDIO_POLICY_HELPER_H_
+#define AUDIO_POLICY_HELPER_H_
+
+#include <system/audio.h>
+
+audio_stream_type_t audio_attributes_to_stream_type(const audio_attributes_t *attr)
+{
+    // flags to stream type mapping
+    if ((attr->flags & AUDIO_FLAG_AUDIBILITY_ENFORCED) == AUDIO_FLAG_AUDIBILITY_ENFORCED) {
+        return AUDIO_STREAM_ENFORCED_AUDIBLE;
+    }
+    if ((attr->flags & AUDIO_FLAG_SCO) == AUDIO_FLAG_SCO) {
+        return AUDIO_STREAM_BLUETOOTH_SCO;
+    }
+
+    // usage to stream type mapping
+    switch (attr->usage) {
+    case AUDIO_USAGE_MEDIA:
+    case AUDIO_USAGE_GAME:
+    case AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY:
+    case AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE:
+        return AUDIO_STREAM_MUSIC;
+    case AUDIO_USAGE_ASSISTANCE_SONIFICATION:
+        return AUDIO_STREAM_SYSTEM;
+    case AUDIO_USAGE_VOICE_COMMUNICATION:
+        return AUDIO_STREAM_VOICE_CALL;
+
+    case AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING:
+        return AUDIO_STREAM_DTMF;
+
+    case AUDIO_USAGE_ALARM:
+        return AUDIO_STREAM_ALARM;
+    case AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE:
+        return AUDIO_STREAM_RING;
+
+    case AUDIO_USAGE_NOTIFICATION:
+    case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST:
+    case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT:
+    case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED:
+    case AUDIO_USAGE_NOTIFICATION_EVENT:
+        return AUDIO_STREAM_NOTIFICATION;
+
+    case AUDIO_USAGE_UNKNOWN:
+    default:
+        return AUDIO_STREAM_MUSIC;
+    }
+}
+
+#endif //AUDIO_POLICY_HELPER_H_
diff --git a/include/media/AudioRecord.h b/include/media/AudioRecord.h
index f9c7efd..4edc1bf 100644
--- a/include/media/AudioRecord.h
+++ b/include/media/AudioRecord.h
@@ -461,6 +461,7 @@
     // for notification APIs
     uint32_t                mNotificationFramesReq; // requested number of frames between each
                                                     // notification callback
+                                                    // as specified in constructor or set()
     uint32_t                mNotificationFramesAct; // actual number of frames between each
                                                     // notification callback
     bool                    mRefreshRemaining;      // processAudioBuffer() should refresh
diff --git a/include/media/AudioSystem.h b/include/media/AudioSystem.h
index e1aab41..9ea18de 100644
--- a/include/media/AudioSystem.h
+++ b/include/media/AudioSystem.h
@@ -234,7 +234,8 @@
                                     uint32_t samplingRate,
                                     audio_format_t format,
                                     audio_channel_mask_t channelMask,
-                                    int sessionId);
+                                    int sessionId,
+                                    audio_input_flags_t);
 
     static status_t startInput(audio_io_handle_t input);
     static status_t stopInput(audio_io_handle_t input);
diff --git a/include/media/IAudioFlinger.h b/include/media/IAudioFlinger.h
index a8f4605..31312d3 100644
--- a/include/media/IAudioFlinger.h
+++ b/include/media/IAudioFlinger.h
@@ -163,7 +163,8 @@
                                         audio_devices_t *pDevices,
                                         uint32_t *pSamplingRate,
                                         audio_format_t *pFormat,
-                                        audio_channel_mask_t *pChannelMask) = 0;
+                                        audio_channel_mask_t *pChannelMask,
+                                        audio_input_flags_t flags) = 0;
     virtual status_t closeInput(audio_io_handle_t input) = 0;
 
     virtual status_t invalidateStream(audio_stream_type_t stream) = 0;
diff --git a/include/media/IAudioPolicyService.h b/include/media/IAudioPolicyService.h
index 959e4c3..e08b5ae 100644
--- a/include/media/IAudioPolicyService.h
+++ b/include/media/IAudioPolicyService.h
@@ -70,10 +70,11 @@
                                 int session = 0) = 0;
     virtual void releaseOutput(audio_io_handle_t output) = 0;
     virtual audio_io_handle_t getInput(audio_source_t inputSource,
-                                    uint32_t samplingRate = 0,
-                                    audio_format_t format = AUDIO_FORMAT_DEFAULT,
-                                    audio_channel_mask_t channelMask = 0,
-                                    int audioSession = 0) = 0;
+                                    uint32_t samplingRate,
+                                    audio_format_t format,
+                                    audio_channel_mask_t channelMask,
+                                    int audioSession,
+                                    audio_input_flags_t flags) = 0;
     virtual status_t startInput(audio_io_handle_t input) = 0;
     virtual status_t stopInput(audio_io_handle_t input) = 0;
     virtual void releaseInput(audio_io_handle_t input) = 0;
diff --git a/include/media/SoundPool.h b/include/media/SoundPool.h
index 2dd78cc..5830475 100644
--- a/include/media/SoundPool.h
+++ b/include/media/SoundPool.h
@@ -167,7 +167,7 @@
     friend class SoundPoolThread;
     friend class SoundChannel;
 public:
-    SoundPool(int maxChannels, audio_stream_type_t streamType, int srcQuality);
+    SoundPool(int maxChannels, const audio_attributes_t* pAttributes);
     ~SoundPool();
     int load(const char* url, int priority);
     int load(int fd, int64_t offset, int64_t length, int priority);
@@ -183,8 +183,7 @@
     void setPriority(int channelID, int priority);
     void setLoop(int channelID, int loop);
     void setRate(int channelID, float rate);
-    audio_stream_type_t streamType() const { return mStreamType; }
-    int srcQuality() const { return mSrcQuality; }
+    const audio_attributes_t* attributes() { return &mAttributes; }
 
     // called from SoundPoolThread
     void sampleLoaded(int sampleID);
@@ -225,8 +224,7 @@
     List<SoundChannel*>     mStop;
     DefaultKeyedVector< int, sp<Sample> >   mSamples;
     int                     mMaxChannels;
-    audio_stream_type_t     mStreamType;
-    int                     mSrcQuality;
+    audio_attributes_t      mAttributes;
     int                     mAllocated;
     int                     mNextSampleID;
     int                     mNextChannelID;
diff --git a/include/media/stagefright/MediaBufferGroup.h b/include/media/stagefright/MediaBufferGroup.h
index 0488292..a006f7f 100644
--- a/include/media/stagefright/MediaBufferGroup.h
+++ b/include/media/stagefright/MediaBufferGroup.h
@@ -34,9 +34,12 @@
 
     void add_buffer(MediaBuffer *buffer);
 
-    // Blocks until a buffer is available and returns it to the caller,
-    // the returned buffer will have a reference count of 1.
-    status_t acquire_buffer(MediaBuffer **buffer);
+    // If nonBlocking is false, it blocks until a buffer is available and
+    // passes it to the caller in *buffer, while returning OK.
+    // The returned buffer will have a reference count of 1.
+    // If nonBlocking is true and a buffer is not immediately available,
+    // buffer is set to NULL and it returns WOULD_BLOCK.
+    status_t acquire_buffer(MediaBuffer **buffer, bool nonBlocking = false);
 
 protected:
     virtual void signalBufferReturned(MediaBuffer *buffer);
diff --git a/include/media/stagefright/MediaCodec.h b/include/media/stagefright/MediaCodec.h
index 3f7508b..26a0963 100644
--- a/include/media/stagefright/MediaCodec.h
+++ b/include/media/stagefright/MediaCodec.h
@@ -30,6 +30,7 @@
 struct AString;
 struct CodecBase;
 struct ICrypto;
+struct IBatteryStats;
 struct SoftwareRenderer;
 struct Surface;
 
@@ -51,6 +52,8 @@
         CB_OUTPUT_FORMAT_CHANGED = 4,
     };
 
+    struct BatteryNotifier;
+
     static sp<MediaCodec> CreateByType(
             const sp<ALooper> &looper, const char *mime, bool encoder);
 
@@ -225,6 +228,9 @@
     sp<AMessage> mInputFormat;
     sp<AMessage> mCallback;
 
+    bool mBatteryStatNotified;
+    bool mIsVideo;
+
     // initial create parameters
     AString mInitName;
     bool mInitNameIsType;
@@ -294,6 +300,7 @@
     status_t onSetParameters(const sp<AMessage> &params);
 
     status_t amendOutputFormatWithCodecSpecificData(const sp<ABuffer> &buffer);
+    void updateBatteryStat();
 
     DISALLOW_EVIL_CONSTRUCTORS(MediaCodec);
 };
diff --git a/include/media/stagefright/MediaCodecList.h b/include/media/stagefright/MediaCodecList.h
index 01a5daf..c11fcc9 100644
--- a/include/media/stagefright/MediaCodecList.h
+++ b/include/media/stagefright/MediaCodecList.h
@@ -25,9 +25,12 @@
 #include <utils/Errors.h>
 #include <utils/KeyedVector.h>
 #include <utils/Vector.h>
+#include <utils/StrongPointer.h>
 
 namespace android {
 
+struct AMessage;
+
 struct MediaCodecList {
     static const MediaCodecList *getInstance();
 
@@ -51,15 +54,19 @@
             size_t index, const char *type,
             Vector<ProfileLevel> *profileLevels,
             Vector<uint32_t> *colorFormats,
-            uint32_t *flags) const;
+            uint32_t *flags,
+            // TODO default argument is only for compatibility with existing JNI
+            sp<AMessage> *capabilities = NULL) const;
 
 private:
     enum Section {
         SECTION_TOPLEVEL,
         SECTION_DECODERS,
         SECTION_DECODER,
+        SECTION_DECODER_TYPE,
         SECTION_ENCODERS,
         SECTION_ENCODER,
+        SECTION_ENCODER_TYPE,
         SECTION_INCLUDE,
     };
 
@@ -67,7 +74,10 @@
         AString mName;
         bool mIsEncoder;
         uint32_t mTypes;
+        uint32_t mSoleType;
         uint32_t mQuirks;
+        KeyedVector<uint32_t, sp<AMessage> > mCaps;
+        sp<AMessage> mCurrentCaps;
     };
 
     static MediaCodecList *sCodecList;
@@ -103,6 +113,8 @@
 
     status_t addQuirk(const char **attrs);
     status_t addTypeFromAttributes(const char **attrs);
+    status_t addLimit(const char **attrs);
+    status_t addFeature(const char **attrs);
     void addType(const char *name);
 
     DISALLOW_EVIL_CONSTRUCTORS(MediaCodecList);
diff --git a/include/media/stagefright/MediaSource.h b/include/media/stagefright/MediaSource.h
index 204d1c6..a653db9 100644
--- a/include/media/stagefright/MediaSource.h
+++ b/include/media/stagefright/MediaSource.h
@@ -82,6 +82,10 @@
         void setLateBy(int64_t lateness_us);
         int64_t getLateBy() const;
 
+        void setNonBlocking();
+        void clearNonBlocking();
+        bool getNonBlocking() const;
+
     private:
         enum Options {
             kSeekTo_Option      = 1,
@@ -91,6 +95,7 @@
         int64_t mSeekTimeUs;
         SeekMode mSeekMode;
         int64_t mLatenessUs;
+        bool mNonBlocking;
     };
 
     // Causes this source to suspend pulling data from its upstream source
diff --git a/include/media/stagefright/foundation/AMessage.h b/include/media/stagefright/foundation/AMessage.h
index 7e823eb..5846d6b 100644
--- a/include/media/stagefright/foundation/AMessage.h
+++ b/include/media/stagefright/foundation/AMessage.h
@@ -50,6 +50,7 @@
     void setDouble(const char *name, double value);
     void setPointer(const char *name, void *value);
     void setString(const char *name, const char *s, ssize_t len = -1);
+    void setString(const char *name, const AString &s);
     void setObject(const char *name, const sp<RefBase> &obj);
     void setBuffer(const char *name, const sp<ABuffer> &buffer);
     void setMessage(const char *name, const sp<AMessage> &obj);
@@ -58,6 +59,8 @@
             const char *name,
             int32_t left, int32_t top, int32_t right, int32_t bottom);
 
+    bool contains(const char *name) const;
+
     bool findInt32(const char *name, int32_t *value) const;
     bool findInt64(const char *name, int64_t *value) const;
     bool findSize(const char *name, size_t *value) const;
diff --git a/include/media/stagefright/foundation/AString.h b/include/media/stagefright/foundation/AString.h
index 0edaa1c..4be3c6d 100644
--- a/include/media/stagefright/foundation/AString.h
+++ b/include/media/stagefright/foundation/AString.h
@@ -70,6 +70,9 @@
     size_t hash() const;
 
     bool operator==(const AString &other) const;
+    bool operator!=(const AString &other) const {
+        return !operator==(other);
+    }
     bool operator<(const AString &other) const;
     bool operator>(const AString &other) const;
 
diff --git a/media/libmedia/AudioRecord.cpp b/media/libmedia/AudioRecord.cpp
index 3ee5809..80c8c5e 100644
--- a/media/libmedia/AudioRecord.cpp
+++ b/media/libmedia/AudioRecord.cpp
@@ -211,7 +211,7 @@
     mReqFrameCount = frameCount;
 
     mNotificationFramesReq = notificationFrames;
-    mNotificationFramesAct = 0;
+    // mNotificationFramesAct is initialized in openRecord_l
 
     if (sessionId == AUDIO_SESSION_ALLOCATE) {
         mSessionId = AudioSystem::newAudioSessionId();
@@ -444,60 +444,25 @@
         }
     }
 
-    // FIXME Assume double buffering, because we don't know the true HAL sample rate
-    const uint32_t nBuffering = 2;
-
-    mNotificationFramesAct = mNotificationFramesReq;
-    size_t frameCount = mReqFrameCount;
-
-    if (!(mFlags & AUDIO_INPUT_FLAG_FAST)) {
-        // validate framecount
-        // If fast track was not requested, this preserves
-        // the old behavior of validating on client side.
-        // FIXME Eventually the validation should be done on server side
-        // regardless of whether it's a fast or normal track.  It's debatable
-        // whether to account for the input latency to provision buffers appropriately.
-        size_t minFrameCount;
-        status = AudioRecord::getMinFrameCount(&minFrameCount,
-                mSampleRate, mFormat, mChannelMask);
-        if (status != NO_ERROR) {
-            ALOGE("getMinFrameCount() failed for sampleRate %u, format %#x, channelMask %#x; "
-                    "status %d",
-                    mSampleRate, mFormat, mChannelMask, status);
-            return status;
-        }
-
-        if (frameCount == 0) {
-            frameCount = minFrameCount;
-        } else if (frameCount < minFrameCount) {
-            ALOGE("frameCount %zu < minFrameCount %zu", frameCount, minFrameCount);
-            return BAD_VALUE;
-        }
-
-        // Make sure that application is notified with sufficient margin before overrun
-        if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/2) {
-            mNotificationFramesAct = frameCount/2;
-        }
-    }
-
     audio_io_handle_t input = AudioSystem::getInput(mInputSource, mSampleRate, mFormat,
-            mChannelMask, mSessionId);
+            mChannelMask, mSessionId, mFlags);
     if (input == AUDIO_IO_HANDLE_NONE) {
         ALOGE("Could not get audio input for record source %d, sample rate %u, format %#x, "
-              "channel mask %#x, session %d",
-              mInputSource, mSampleRate, mFormat, mChannelMask, mSessionId);
+              "channel mask %#x, session %d, flags %#x",
+              mInputSource, mSampleRate, mFormat, mChannelMask, mSessionId, mFlags);
         return BAD_VALUE;
     }
     {
     // Now that we have a reference to an I/O handle and have not yet handed it off to AudioFlinger,
     // we must release it ourselves if anything goes wrong.
 
+    size_t frameCount = mReqFrameCount;
     size_t temp = frameCount;   // temp may be replaced by a revised value of frameCount,
                                 // but we will still need the original value also
     int originalSessionId = mSessionId;
 
     // The notification frame count is the period between callbacks, as suggested by the server.
-    size_t notificationFrames;
+    size_t notificationFrames = mNotificationFramesReq;
 
     sp<IMemory> iMem;           // for cblk
     sp<IMemory> bufferMem;
@@ -576,14 +541,14 @@
             // once denied, do not request again if IAudioRecord is re-created
             mFlags = (audio_input_flags_t) (mFlags & ~AUDIO_INPUT_FLAG_FAST);
         }
-        // Theoretically double-buffering is not required for fast tracks,
-        // due to tighter scheduling.  But in practice, to accomodate kernels with
-        // scheduling jitter, and apps with computation jitter, we use double-buffering.
-        if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/nBuffering) {
-            mNotificationFramesAct = frameCount/nBuffering;
-        }
     }
 
+    // Make sure that application is notified with sufficient margin before overrun
+    if (notificationFrames == 0 || notificationFrames > frameCount) {
+        ALOGW("Received notificationFrames %zu for frameCount %zu", notificationFrames, frameCount);
+    }
+    mNotificationFramesAct = notificationFrames;
+
     // We retain a copy of the I/O handle, but don't own the reference
     mInput = input;
     mRefreshRemaining = true;
diff --git a/media/libmedia/AudioSystem.cpp b/media/libmedia/AudioSystem.cpp
index a47d45c..fd5824b 100644
--- a/media/libmedia/AudioSystem.cpp
+++ b/media/libmedia/AudioSystem.cpp
@@ -688,11 +688,12 @@
                                     uint32_t samplingRate,
                                     audio_format_t format,
                                     audio_channel_mask_t channelMask,
-                                    int sessionId)
+                                    int sessionId,
+                                    audio_input_flags_t flags)
 {
     const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
     if (aps == 0) return 0;
-    return aps->getInput(inputSource, samplingRate, format, channelMask, sessionId);
+    return aps->getInput(inputSource, samplingRate, format, channelMask, sessionId, flags);
 }
 
 status_t AudioSystem::startInput(audio_io_handle_t input)
diff --git a/media/libmedia/IAudioFlinger.cpp b/media/libmedia/IAudioFlinger.cpp
index 5cf42f7..bd7ea46 100644
--- a/media/libmedia/IAudioFlinger.cpp
+++ b/media/libmedia/IAudioFlinger.cpp
@@ -197,6 +197,7 @@
             lSessionId = *sessionId;
         }
         data.writeInt32(lSessionId);
+        data.writeInt64(notificationFrames != NULL ? *notificationFrames : 0);
         cblk.clear();
         buffers.clear();
         status_t lStatus = remote()->transact(OPEN_RECORD, data, &reply);
@@ -532,7 +533,8 @@
                                         audio_devices_t *pDevices,
                                         uint32_t *pSamplingRate,
                                         audio_format_t *pFormat,
-                                        audio_channel_mask_t *pChannelMask)
+                                        audio_channel_mask_t *pChannelMask,
+                                        audio_input_flags_t flags)
     {
         Parcel data, reply;
         audio_devices_t devices = pDevices != NULL ? *pDevices : AUDIO_DEVICE_NONE;
@@ -547,6 +549,7 @@
         data.writeInt32(samplingRate);
         data.writeInt32(format);
         data.writeInt32(channelMask);
+        data.writeInt32(flags);
         remote()->transact(OPEN_INPUT, data, &reply);
         audio_io_handle_t input = (audio_io_handle_t) reply.readInt32();
         devices = (audio_devices_t)reply.readInt32();
@@ -964,7 +967,7 @@
             track_flags_t flags = (track_flags_t) data.readInt32();
             pid_t tid = (pid_t) data.readInt32();
             int sessionId = data.readInt32();
-            size_t notificationFrames = 0;
+            size_t notificationFrames = data.readInt64();
             sp<IMemory> cblk;
             sp<IMemory> buffers;
             status_t status;
@@ -1157,12 +1160,14 @@
             uint32_t samplingRate = data.readInt32();
             audio_format_t format = (audio_format_t) data.readInt32();
             audio_channel_mask_t channelMask = (audio_channel_mask_t)data.readInt32();
+            audio_input_flags_t flags = (audio_input_flags_t) data.readInt32();
 
             audio_io_handle_t input = openInput(module,
                                              &devices,
                                              &samplingRate,
                                              &format,
-                                             &channelMask);
+                                             &channelMask,
+                                             flags);
             reply->writeInt32((int32_t) input);
             reply->writeInt32(devices);
             reply->writeInt32(samplingRate);
diff --git a/media/libmedia/IAudioPolicyService.cpp b/media/libmedia/IAudioPolicyService.cpp
index 41a9065..40dfb58 100644
--- a/media/libmedia/IAudioPolicyService.cpp
+++ b/media/libmedia/IAudioPolicyService.cpp
@@ -225,7 +225,8 @@
                                     uint32_t samplingRate,
                                     audio_format_t format,
                                     audio_channel_mask_t channelMask,
-                                    int audioSession)
+                                    int audioSession,
+                                    audio_input_flags_t flags)
     {
         Parcel data, reply;
         data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
@@ -234,6 +235,7 @@
         data.writeInt32(static_cast <uint32_t>(format));
         data.writeInt32(channelMask);
         data.writeInt32(audioSession);
+        data.writeInt32(flags);
         remote()->transact(GET_INPUT, data, &reply);
         return static_cast <audio_io_handle_t> (reply.readInt32());
     }
@@ -707,11 +709,13 @@
             audio_format_t format = (audio_format_t) data.readInt32();
             audio_channel_mask_t channelMask = data.readInt32();
             int audioSession = data.readInt32();
+            audio_input_flags_t flags = (audio_input_flags_t) data.readInt32();
             audio_io_handle_t input = getInput(inputSource,
                                                samplingRate,
                                                format,
                                                channelMask,
-                                               audioSession);
+                                               audioSession,
+                                               flags);
             reply->writeInt32(static_cast <int>(input));
             return NO_ERROR;
         } break;
diff --git a/media/libmedia/SoundPool.cpp b/media/libmedia/SoundPool.cpp
index 2aa0592..d2e381b 100644
--- a/media/libmedia/SoundPool.cpp
+++ b/media/libmedia/SoundPool.cpp
@@ -28,6 +28,7 @@
 #include <media/mediaplayer.h>
 #include <media/SoundPool.h>
 #include "SoundPoolThread.h"
+#include <media/AudioPolicyHelper.h>
 
 namespace android
 {
@@ -39,10 +40,10 @@
 size_t kDefaultHeapSize = 1024 * 1024; // 1MB
 
 
-SoundPool::SoundPool(int maxChannels, audio_stream_type_t streamType, int srcQuality)
+SoundPool::SoundPool(int maxChannels, const audio_attributes_t* pAttributes)
 {
-    ALOGV("SoundPool constructor: maxChannels=%d, streamType=%d, srcQuality=%d",
-            maxChannels, streamType, srcQuality);
+    ALOGV("SoundPool constructor: maxChannels=%d, attr.usage=%d, attr.flags=0x%x, attr.tags=%s",
+            maxChannels, pAttributes->usage, pAttributes->flags, pAttributes->tags);
 
     // check limits
     mMaxChannels = maxChannels;
@@ -56,8 +57,7 @@
 
     mQuit = false;
     mDecodeThread = 0;
-    mStreamType = streamType;
-    mSrcQuality = srcQuality;
+    memcpy(&mAttributes, pAttributes, sizeof(audio_attributes_t));
     mAllocated = 0;
     mNextSampleID = 0;
     mNextChannelID = 0;
@@ -580,7 +580,7 @@
         // initialize track
         size_t afFrameCount;
         uint32_t afSampleRate;
-        audio_stream_type_t streamType = mSoundPool->streamType();
+        audio_stream_type_t streamType = audio_attributes_to_stream_type(mSoundPool->attributes());
         if (AudioSystem::getOutputFrameCount(&afFrameCount, streamType) != NO_ERROR) {
             afFrameCount = kDefaultFrameCount;
         }
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.cpp b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
index cc0cb01..d75408d 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
@@ -28,6 +28,7 @@
 #include <media/stagefright/MediaExtractor.h>
 #include <media/stagefright/MediaSource.h>
 #include <media/stagefright/MetaData.h>
+#include "../../libstagefright/include/WVMExtractor.h"
 
 namespace android {
 
@@ -35,10 +36,16 @@
         const sp<AMessage> &notify,
         const sp<IMediaHTTPService> &httpService,
         const char *url,
-        const KeyedVector<String8, String8> *headers)
+        const KeyedVector<String8, String8> *headers,
+        bool isWidevine,
+        bool uidValid,
+        uid_t uid)
     : Source(notify),
       mDurationUs(0ll),
-      mAudioIsVorbis(false) {
+      mAudioIsVorbis(false),
+      mIsWidevine(isWidevine),
+      mUIDValid(uidValid),
+      mUID(uid) {
     DataSource::RegisterDefaultSniffers();
 
     sp<DataSource> dataSource =
@@ -63,7 +70,31 @@
 
 void NuPlayer::GenericSource::initFromDataSource(
         const sp<DataSource> &dataSource) {
-    sp<MediaExtractor> extractor = MediaExtractor::Create(dataSource);
+    sp<MediaExtractor> extractor;
+
+    if (mIsWidevine) {
+        String8 mimeType;
+        float confidence;
+        sp<AMessage> dummy;
+        bool success;
+
+        success = SniffWVM(dataSource, &mimeType, &confidence, &dummy);
+        if (!success
+                || strcasecmp(
+                    mimeType.string(), MEDIA_MIMETYPE_CONTAINER_WVM)) {
+            ALOGE("unsupported widevine mime: %s", mimeType.string());
+            return;
+        }
+
+        sp<WVMExtractor> wvmExtractor = new WVMExtractor(dataSource);
+        wvmExtractor->setAdaptiveStreamingMode(true);
+        if (mUIDValid) {
+            wvmExtractor->setUID(mUID);
+        }
+        extractor = wvmExtractor;
+    } else {
+        extractor = MediaExtractor::Create(dataSource);
+    }
 
     CHECK(extractor != NULL);
 
@@ -113,6 +144,13 @@
     }
 }
 
+status_t NuPlayer::GenericSource::setBuffers(bool audio, Vector<MediaBuffer *> &buffers) {
+    if (mIsWidevine && !audio) {
+        return mVideoTrack.mSource->setBuffers(buffers);
+    }
+    return INVALID_OPERATION;
+}
+
 NuPlayer::GenericSource::~GenericSource() {
 }
 
@@ -128,7 +166,8 @@
     }
 
     notifyFlagsChanged(
-            FLAG_CAN_PAUSE
+            (mIsWidevine ? FLAG_SECURE : 0)
+            | FLAG_CAN_PAUSE
             | FLAG_CAN_SEEK_BACKWARD
             | FLAG_CAN_SEEK_FORWARD
             | FLAG_CAN_SEEK);
@@ -180,9 +219,14 @@
         return -EWOULDBLOCK;
     }
 
+    if (mIsWidevine && !audio) {
+        // try to read a buffer as we may not have been able to the last time
+        readBuffer(audio, -1ll);
+    }
+
     status_t finalResult;
     if (!track->mPackets->hasBufferAvailable(&finalResult)) {
-        return finalResult == OK ? -EWOULDBLOCK : finalResult;
+        return (finalResult == OK ? -EWOULDBLOCK : finalResult);
     }
 
     status_t result = track->mPackets->dequeueAccessUnit(accessUnit);
@@ -280,6 +324,10 @@
         seeking = true;
     }
 
+    if (mIsWidevine && !audio) {
+        options.setNonBlocking();
+    }
+
     for (;;) {
         MediaBuffer *mbuf;
         status_t err = track->mSource->read(&mbuf, &options);
@@ -293,11 +341,18 @@
                 outLength += sizeof(int32_t);
             }
 
-            sp<ABuffer> buffer = new ABuffer(outLength);
-
-            memcpy(buffer->data(),
-                   (const uint8_t *)mbuf->data() + mbuf->range_offset(),
-                   mbuf->range_length());
+            sp<ABuffer> buffer;
+            if (mIsWidevine && !audio) {
+                // data is already provided in the buffer
+                buffer = new ABuffer(NULL, mbuf->range_length());
+                buffer->meta()->setPointer("mediaBuffer", mbuf);
+                mbuf->add_ref();
+            } else {
+                buffer = new ABuffer(outLength);
+                memcpy(buffer->data(),
+                       (const uint8_t *)mbuf->data() + mbuf->range_offset(),
+                       mbuf->range_length());
+            }
 
             if (audio && mAudioIsVorbis) {
                 int32_t numPageSamples;
@@ -332,6 +387,8 @@
 
             track->mPackets->queueAccessUnit(buffer);
             break;
+        } else if (err == WOULD_BLOCK) {
+            break;
         } else if (err == INFO_FORMAT_CHANGED) {
 #if 0
             track->mPackets->queueDiscontinuity(
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.h b/media/libmediaplayerservice/nuplayer/GenericSource.h
index e0cd20f..8e0209d 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.h
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.h
@@ -35,7 +35,10 @@
             const sp<AMessage> &notify,
             const sp<IMediaHTTPService> &httpService,
             const char *url,
-            const KeyedVector<String8, String8> *headers);
+            const KeyedVector<String8, String8> *headers,
+            bool isWidevine = false,
+            bool uidValid = false,
+            uid_t uid = 0);
 
     GenericSource(
             const sp<AMessage> &notify,
@@ -54,6 +57,8 @@
     virtual sp<AMessage> getTrackInfo(size_t trackIndex) const;
     virtual status_t seekTo(int64_t seekTimeUs);
 
+    virtual status_t setBuffers(bool audio, Vector<MediaBuffer *> &buffers);
+
 protected:
     virtual ~GenericSource();
 
@@ -73,6 +78,9 @@
 
     int64_t mDurationUs;
     bool mAudioIsVorbis;
+    bool mIsWidevine;
+    bool mUIDValid;
+    uid_t mUID;
 
     void initFromDataSource(const sp<DataSource> &dataSource);
 
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
index 88c59bf..fa6b1e5 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
@@ -36,6 +36,7 @@
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/MediaBuffer.h>
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/MetaData.h>
@@ -221,6 +222,10 @@
                     || strstr(url, ".sdp?"))) {
         source = new RTSPSource(
                 notify, httpService, url, headers, mUIDValid, mUID, true);
+    } else if ((!strncasecmp(url, "widevine://", 11))) {
+        source = new GenericSource(notify, httpService, url, headers,
+                true /* isWidevine */, mUIDValid, mUID);
+        mSourceFlags |= Source::FLAG_SECURE;
     } else {
         source = new GenericSource(notify, httpService, url, headers);
     }
@@ -512,6 +517,17 @@
             mNumFramesDropped = 0;
             mStarted = true;
 
+            /* instantiate decoders now for secure playback */
+            if (mSourceFlags & Source::FLAG_SECURE) {
+                if (mNativeWindow != NULL) {
+                    instantiateDecoder(false, &mVideoDecoder);
+                }
+
+                if (mAudioSink != NULL) {
+                    instantiateDecoder(true, &mAudioDecoder);
+                }
+            }
+
             mSource->start();
 
             uint32_t flags = 0;
@@ -540,7 +556,10 @@
                     new AMessage(kWhatRendererNotify, id()),
                     flags);
 
-            looper()->registerHandler(mRenderer);
+            mRendererLooper = new ALooper;
+            mRendererLooper->setName("NuPlayerRenderer");
+            mRendererLooper->start(false, false, ANDROID_PRIORITY_AUDIO);
+            mRendererLooper->registerHandler(mRenderer);
 
             postScanSources();
             break;
@@ -735,6 +754,7 @@
                             offloadInfo.has_video = (mVideoDecoder != NULL);
                             offloadInfo.is_streaming = true;
 
+                            ALOGV("try to open AudioSink in offload mode");
                             err = mAudioSink->open(
                                     sampleRate,
                                     numChannels,
@@ -774,6 +794,7 @@
 
                     if (!mOffloadAudio) {
                         flags &= ~AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD;
+                        ALOGV("open AudioSink in NON-offload mode");
                         CHECK_EQ(mAudioSink->open(
                                     sampleRate,
                                     numChannels,
@@ -921,6 +942,21 @@
             } else if (what == Renderer::kWhatMediaRenderingStart) {
                 ALOGV("media rendering started");
                 notifyListener(MEDIA_STARTED, 0, 0);
+            } else if (what == Renderer::kWhatAudioOffloadTearDown) {
+                ALOGV("Tear down audio offload, fall back to s/w path");
+                int64_t positionUs;
+                CHECK(msg->findInt64("positionUs", &positionUs));
+                mAudioSink->close();
+                mAudioDecoder.clear();
+                mRenderer->flush(true /* audio */);
+                if (mVideoDecoder != NULL) {
+                    mRenderer->flush(false /* audio */);
+                }
+                mRenderer->signalDisableOffloadAudio();
+                mOffloadAudio = false;
+
+                performSeek(positionUs);
+                instantiateDecoder(true /* audio */, &mAudioDecoder);
             }
             break;
         }
@@ -1055,6 +1091,10 @@
 
         sp<AMessage> ccNotify = new AMessage(kWhatClosedCaptionNotify, id());
         mCCDecoder = new CCDecoder(ccNotify);
+
+        if (mSourceFlags & Source::FLAG_SECURE) {
+            format->setInt32("secure", true);
+        }
     }
 
     sp<AMessage> notify =
@@ -1073,6 +1113,28 @@
     (*decoder)->init();
     (*decoder)->configure(format);
 
+    // allocate buffers to decrypt widevine source buffers
+    if (!audio && (mSourceFlags & Source::FLAG_SECURE)) {
+        Vector<sp<ABuffer> > inputBufs;
+        CHECK_EQ((*decoder)->getInputBuffers(&inputBufs), (status_t)OK);
+
+        Vector<MediaBuffer *> mediaBufs;
+        for (size_t i = 0; i < inputBufs.size(); i++) {
+            const sp<ABuffer> &buffer = inputBufs[i];
+            MediaBuffer *mbuf = new MediaBuffer(buffer->data(), buffer->size());
+            mediaBufs.push(mbuf);
+        }
+
+        status_t err = mSource->setBuffers(audio, mediaBufs);
+        if (err != OK) {
+            for (size_t i = 0; i < mediaBufs.size(); ++i) {
+                mediaBufs[i]->release();
+            }
+            mediaBufs.clear();
+            ALOGE("Secure source didn't support secure mediaBufs.");
+            return err;
+        }
+    }
     return OK;
 }
 
@@ -1184,6 +1246,7 @@
 
         dropAccessUnit = false;
         if (!audio
+                && !(mSourceFlags & Source::FLAG_SECURE)
                 && mVideoLateByUs > 100000ll
                 && mVideoIsAVC
                 && !IsAVCReferenceFrame(accessUnit)) {
@@ -1497,6 +1560,13 @@
     ++mScanSourcesGeneration;
     mScanSourcesPending = false;
 
+    if (mRendererLooper != NULL) {
+        if (mRenderer != NULL) {
+            mRendererLooper->unregisterHandler(mRenderer->id());
+        }
+        mRendererLooper->stop();
+        mRendererLooper.clear();
+    }
     mRenderer.clear();
 
     if (mSource != NULL) {
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.h b/media/libmediaplayerservice/nuplayer/NuPlayer.h
index d7c00aa..c04e277 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.h
@@ -125,6 +125,7 @@
     sp<Decoder> mAudioDecoder;
     sp<CCDecoder> mCCDecoder;
     sp<Renderer> mRenderer;
+    sp<ALooper> mRendererLooper;
 
     List<sp<Action> > mDeferredActions;
 
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
index dd73cc4..1b9bafb 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
@@ -26,6 +26,7 @@
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/MediaBuffer.h>
 #include <media/stagefright/MediaCodec.h>
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/MediaErrors.h>
@@ -54,6 +55,22 @@
 NuPlayer::Decoder::~Decoder() {
 }
 
+static
+status_t PostAndAwaitResponse(
+        const sp<AMessage> &msg, sp<AMessage> *response) {
+    status_t err = msg->postAndAwaitResponse(response);
+
+    if (err != OK) {
+        return err;
+    }
+
+    if (!(*response)->findInt32("err", &err)) {
+        err = OK;
+    }
+
+    return err;
+}
+
 void NuPlayer::Decoder::onConfigure(const sp<AMessage> &format) {
     CHECK(mCodec == NULL);
 
@@ -72,8 +89,20 @@
     ALOGV("[%s] onConfigure (surface=%p)", mComponentName.c_str(), surface.get());
 
     mCodec = MediaCodec::CreateByType(mCodecLooper, mime.c_str(), false /* encoder */);
+    int32_t secure = 0;
+    if (format->findInt32("secure", &secure) && secure != 0) {
+        if (mCodec != NULL) {
+            mCodec->getName(&mComponentName);
+            mComponentName.append(".secure");
+            mCodec->release();
+            ALOGI("[%s] creating", mComponentName.c_str());
+            mCodec = MediaCodec::CreateByComponentName(
+                    mCodecLooper, mComponentName.c_str());
+        }
+    }
     if (mCodec == NULL) {
-        ALOGE("Failed to create %s decoder", mime.c_str());
+        ALOGE("Failed to create %s%s decoder",
+                (secure ? "secure " : ""), mime.c_str());
         handleError(UNKNOWN_ERROR);
         return;
     }
@@ -107,6 +136,7 @@
 
     // the following should work after start
     CHECK_EQ((status_t)OK, mCodec->getInputBuffers(&mInputBuffers));
+    releaseAndResetMediaBuffers();
     CHECK_EQ((status_t)OK, mCodec->getOutputBuffers(&mOutputBuffers));
     ALOGV("[%s] got %zu input and %zu output buffers",
             mComponentName.c_str(),
@@ -117,6 +147,18 @@
     mPaused = false;
 }
 
+void NuPlayer::Decoder::releaseAndResetMediaBuffers() {
+    for (size_t i = 0; i < mMediaBuffers.size(); i++) {
+        if (mMediaBuffers[i] != NULL) {
+            mMediaBuffers[i]->release();
+            mMediaBuffers.editItemAt(i) = NULL;
+        }
+    }
+    mMediaBuffers.resize(mInputBuffers.size());
+    mInputBufferIsDequeued.clear();
+    mInputBufferIsDequeued.resize(mInputBuffers.size());
+}
+
 void NuPlayer::Decoder::requestCodecNotification() {
     if (mCodec != NULL) {
         sp<AMessage> reply = new AMessage(kWhatCodecNotify, id());
@@ -141,6 +183,14 @@
     msg->post();
 }
 
+status_t NuPlayer::Decoder::getInputBuffers(Vector<sp<ABuffer> > *buffers) const {
+    sp<AMessage> msg = new AMessage(kWhatGetInputBuffers, id());
+    msg->setPointer("buffers", buffers);
+
+    sp<AMessage> response;
+    return PostAndAwaitResponse(msg, &response);
+}
+
 void NuPlayer::Decoder::handleError(int32_t err)
 {
     sp<AMessage> notify = mNotify->dup();
@@ -163,6 +213,12 @@
 
     CHECK_LT(bufferIx, mInputBuffers.size());
 
+    if (mMediaBuffers[bufferIx] != NULL) {
+        mMediaBuffers[bufferIx]->release();
+        mMediaBuffers.editItemAt(bufferIx) = NULL;
+    }
+    mInputBufferIsDequeued.editItemAt(bufferIx) = true;
+
     sp<AMessage> reply = new AMessage(kWhatInputBufferFilled, id());
     reply->setSize("buffer-ix", bufferIx);
     reply->setInt32("generation", mBufferGeneration);
@@ -183,6 +239,44 @@
 
     sp<ABuffer> buffer;
     bool hasBuffer = msg->findBuffer("buffer", &buffer);
+
+    // handle widevine classic source - that fills an arbitrary input buffer
+    MediaBuffer *mediaBuffer = NULL;
+    if (hasBuffer && buffer->meta()->findPointer(
+            "mediaBuffer", (void **)&mediaBuffer)) {
+        if (mediaBuffer == NULL) {
+            // received no actual buffer
+            ALOGW("[%s] received null MediaBuffer %s",
+                    mComponentName.c_str(), msg->debugString().c_str());
+            buffer = NULL;
+        } else {
+            // likely filled another buffer than we requested: adjust buffer index
+            size_t ix;
+            for (ix = 0; ix < mInputBuffers.size(); ix++) {
+                const sp<ABuffer> &buf = mInputBuffers[ix];
+                if (buf->data() == mediaBuffer->data()) {
+                    // all input buffers are dequeued on start, hence the check
+                    CHECK(mInputBufferIsDequeued[ix]);
+                    ALOGV("[%s] received MediaBuffer for #%zu instead of #%zu",
+                            mComponentName.c_str(), ix, bufferIx);
+
+                    // TRICKY: need buffer for the metadata, so instead, set
+                    // codecBuffer to the same (though incorrect) buffer to
+                    // avoid a memcpy into the codecBuffer
+                    codecBuffer = buffer;
+                    codecBuffer->setRange(
+                            mediaBuffer->range_offset(),
+                            mediaBuffer->range_length());
+                    bufferIx = ix;
+                    break;
+                }
+            }
+            CHECK(ix < mInputBuffers.size());
+        }
+    }
+
+    mInputBufferIsDequeued.editItemAt(bufferIx) = false;
+
     if (buffer == NULL /* includes !hasBuffer */) {
         int32_t streamErr = ERROR_END_OF_STREAM;
         CHECK(msg->findInt32("err", &streamErr) || !hasBuffer);
@@ -236,6 +330,11 @@
                     mComponentName.c_str(), err);
             handleError(err);
         }
+
+        if (mediaBuffer != NULL) {
+            CHECK(mMediaBuffers[bufferIx] == NULL);
+            mMediaBuffers.editItemAt(bufferIx) = mediaBuffer;
+        }
     }
 }
 
@@ -352,6 +451,8 @@
         return;
     }
 
+    releaseAndResetMediaBuffers();
+
     sp<AMessage> notify = mNotify->dup();
     notify->setInt32("what", kWhatFlushCompleted);
     notify->post();
@@ -379,6 +480,8 @@
         mComponentName = "decoder";
     }
 
+    releaseAndResetMediaBuffers();
+
     if (err != OK) {
         ALOGE("failed to release %s (err=%d)", mComponentName.c_str(), err);
         handleError(err);
@@ -403,6 +506,23 @@
             break;
         }
 
+        case kWhatGetInputBuffers:
+        {
+            uint32_t replyID;
+            CHECK(msg->senderAwaitsResponse(&replyID));
+
+            Vector<sp<ABuffer> > *dstBuffers;
+            CHECK(msg->findPointer("buffers", (void **)&dstBuffers));
+
+            dstBuffers->clear();
+            for (size_t i = 0; i < mInputBuffers.size(); i++) {
+                dstBuffers->push(mInputBuffers[i]);
+            }
+
+            (new AMessage)->postReply(replyID);
+            break;
+        }
+
         case kWhatCodecNotify:
         {
             if (!isStaleReply(msg)) {
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h
index 4fa0dbd..c6fc237 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h
@@ -26,6 +26,7 @@
 
 struct ABuffer;
 struct MediaCodec;
+struct MediaBuffer;
 
 struct NuPlayer::Decoder : public AHandler {
     Decoder(const sp<AMessage> &notify,
@@ -34,6 +35,7 @@
     virtual void configure(const sp<AMessage> &format);
     virtual void init();
 
+    status_t getInputBuffers(Vector<sp<ABuffer> > *dstBuffers) const;
     virtual void signalFlush();
     virtual void signalResume();
     virtual void initiateShutdown();
@@ -60,6 +62,7 @@
     enum {
         kWhatCodecNotify        = 'cdcN',
         kWhatConfigure          = 'conf',
+        kWhatGetInputBuffers    = 'gInB',
         kWhatInputBufferFilled  = 'inpF',
         kWhatRenderBuffer       = 'rndr',
         kWhatFlush              = 'flus',
@@ -77,11 +80,14 @@
 
     Vector<sp<ABuffer> > mInputBuffers;
     Vector<sp<ABuffer> > mOutputBuffers;
+    Vector<bool> mInputBufferIsDequeued;
+    Vector<MediaBuffer *> mMediaBuffers;
 
     void handleError(int32_t err);
     bool handleAnInputBuffer();
     bool handleAnOutputBuffer();
 
+    void releaseAndResetMediaBuffers();
     void requestCodecNotification();
     bool isStaleReply(const sp<AMessage> &msg);
 
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
index f520ff7..3640038 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
@@ -26,6 +26,8 @@
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/MetaData.h>
 
+#include <inttypes.h>
+
 namespace android {
 
 // static
@@ -221,6 +223,12 @@
             break;
         }
 
+        case kWhatAudioOffloadTearDown:
+        {
+            onAudioOffloadTearDown();
+            break;
+        }
+
         default:
             TRESPASS();
             break;
@@ -292,7 +300,7 @@
 
         case MediaPlayerBase::AudioSink::CB_EVENT_TEAR_DOWN:
         {
-            // TODO: send this to player.
+            me->notifyAudioOffloadTearDown();
             break;
         }
     }
@@ -502,6 +510,7 @@
         }
     }
 
+    ALOGW_IF(delayUs > 500000, "unusually high delayUs: %" PRId64, delayUs);
     msg->post(delayUs);
 
     mDrainVideoQueuePending = true;
@@ -579,6 +588,10 @@
     notify->post();
 }
 
+void NuPlayer::Renderer::notifyAudioOffloadTearDown() {
+    (new AMessage(kWhatAudioOffloadTearDown, id()))->post();
+}
+
 void NuPlayer::Renderer::onQueueBuffer(const sp<AMessage> &msg) {
     int32_t audio;
     CHECK(msg->findInt32("audio", &audio));
@@ -811,6 +824,7 @@
 void NuPlayer::Renderer::onDisableOffloadAudio() {
     Mutex::Autolock autoLock(mLock);
     mFlags &= ~FLAG_OFFLOAD_AUDIO;
+    ++mAudioQueueGeneration;
 }
 
 void NuPlayer::Renderer::notifyPosition() {
@@ -877,5 +891,21 @@
     }
 }
 
+void NuPlayer::Renderer::onAudioOffloadTearDown() {
+    uint32_t numFramesPlayed;
+    CHECK_EQ(mAudioSink->getPosition(&numFramesPlayed), (status_t)OK);
+
+    int64_t currentPositionUs = mFirstAudioTimeUs
+            + (numFramesPlayed * mAudioSink->msecsPerFrame()) * 1000ll;
+
+    mAudioSink->stop();
+    mAudioSink->flush();
+
+    sp<AMessage> notify = mNotify->dup();
+    notify->setInt32("what", kWhatAudioOffloadTearDown);
+    notify->setInt64("positionUs", currentPositionUs);
+    notify->post();
+}
+
 }  // namespace android
 
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
index 6e86a8f..1cba1a0 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
@@ -62,6 +62,7 @@
         kWhatPosition            = 'posi',
         kWhatVideoRenderingStart = 'vdrd',
         kWhatMediaRenderingStart = 'mdrd',
+        kWhatAudioOffloadTearDown = 'aOTD',
     };
 
 protected:
@@ -143,12 +144,14 @@
     void onDisableOffloadAudio();
     void onPause();
     void onResume();
+    void onAudioOffloadTearDown();
 
     void notifyEOS(bool audio, status_t finalResult);
     void notifyFlushComplete(bool audio);
     void notifyPosition();
     void notifyVideoLateBy(int64_t lateByUs);
     void notifyVideoRenderingStart();
+    void notifyAudioOffloadTearDown();
 
     void flushQueue(List<QueueEntry> *queue);
     bool dropBufferWhileFlushing(bool audio, const sp<AMessage> &msg);
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerSource.h b/media/libmediaplayerservice/nuplayer/NuPlayerSource.h
index 632c4a6..259925f 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerSource.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerSource.h
@@ -21,11 +21,14 @@
 #include "NuPlayer.h"
 
 #include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/MetaData.h>
+#include <utils/Vector.h>
 
 namespace android {
 
 struct ABuffer;
 struct MetaData;
+struct MediaBuffer;
 
 struct NuPlayer::Source : public AHandler {
     enum Flags {
@@ -34,6 +37,7 @@
         FLAG_CAN_SEEK_FORWARD   = 4,  // the "10 sec forward button"
         FLAG_CAN_SEEK           = 8,  // the "seek bar"
         FLAG_DYNAMIC_DURATION   = 16,
+        FLAG_SECURE             = 32,
     };
 
     enum {
@@ -89,6 +93,10 @@
         return INVALID_OPERATION;
     }
 
+    virtual status_t setBuffers(bool /* audio */, Vector<MediaBuffer *> &/* buffers */) {
+        return INVALID_OPERATION;
+    }
+
     virtual bool isRealTime() const {
         return false;
     }
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index 9c64d72..6cb1c64 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -3989,6 +3989,8 @@
 
         if (err == OK) {
             break;
+        } else {
+            ALOGW("Allocating component '%s' failed, try next one.", componentName.c_str());
         }
 
         node = NULL;
@@ -4504,11 +4506,14 @@
 
     submitOutputBuffers();
 
-    // Post the first input buffer.
+    // Post all available input buffers
     CHECK_GT(mCodec->mBuffers[kPortIndexInput].size(), 0u);
-    BufferInfo *info = &mCodec->mBuffers[kPortIndexInput].editItemAt(0);
-
-    postFillThisBuffer(info);
+    for (size_t i = 0; i < mCodec->mBuffers[kPortIndexInput].size(); i++) {
+        BufferInfo *info = &mCodec->mBuffers[kPortIndexInput].editItemAt(i);
+        if (info->mStatus == BufferInfo::OWNED_BY_US) {
+            postFillThisBuffer(info);
+        }
+    }
 
     mActive = true;
 }
diff --git a/media/libstagefright/MPEG4Extractor.cpp b/media/libstagefright/MPEG4Extractor.cpp
index 207acc8..19da6ee 100644
--- a/media/libstagefright/MPEG4Extractor.cpp
+++ b/media/libstagefright/MPEG4Extractor.cpp
@@ -3665,7 +3665,7 @@
 
         uint32_t sampleIndex;
         status_t err = mSampleTable->findSampleAtTime(
-                seekTimeUs * mTimescale / 1000000,
+                seekTimeUs, 1000000, mTimescale,
                 &sampleIndex, findFlags);
 
         if (mode == ReadOptions::SEEK_CLOSEST) {
diff --git a/media/libstagefright/MediaBufferGroup.cpp b/media/libstagefright/MediaBufferGroup.cpp
index 80aae51..6ac6d4a 100644
--- a/media/libstagefright/MediaBufferGroup.cpp
+++ b/media/libstagefright/MediaBufferGroup.cpp
@@ -55,7 +55,8 @@
     mLastBuffer = buffer;
 }
 
-status_t MediaBufferGroup::acquire_buffer(MediaBuffer **out) {
+status_t MediaBufferGroup::acquire_buffer(
+        MediaBuffer **out, bool nonBlocking) {
     Mutex::Autolock autoLock(mLock);
 
     for (;;) {
@@ -70,6 +71,11 @@
             }
         }
 
+        if (nonBlocking) {
+            *out = NULL;
+            return WOULD_BLOCK;
+        }
+
         // All buffers are in use. Block until one of them is returned to us.
         mCondition.wait(mLock);
     }
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index 7a9cb0b..15e062e 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -16,13 +16,13 @@
 
 //#define LOG_NDEBUG 0
 #define LOG_TAG "MediaCodec"
-#include <utils/Log.h>
 #include <inttypes.h>
 
-#include <media/stagefright/MediaCodec.h>
-
+#include "include/avc_utils.h"
 #include "include/SoftwareRenderer.h"
 
+#include <binder/IBatteryStats.h>
+#include <binder/IServiceManager.h>
 #include <gui/Surface.h>
 #include <media/ICrypto.h>
 #include <media/stagefright/foundation/ABuffer.h>
@@ -32,16 +32,85 @@
 #include <media/stagefright/foundation/hexdump.h>
 #include <media/stagefright/ACodec.h>
 #include <media/stagefright/BufferProducerWrapper.h>
+#include <media/stagefright/MediaCodec.h>
 #include <media/stagefright/MediaCodecList.h>
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/MetaData.h>
 #include <media/stagefright/NativeWindowWrapper.h>
-
-#include "include/avc_utils.h"
+#include <private/android_filesystem_config.h>
+#include <utils/Log.h>
+#include <utils/Singleton.h>
 
 namespace android {
 
+struct MediaCodec::BatteryNotifier : public Singleton<BatteryNotifier> {
+    BatteryNotifier();
+
+    void noteStartVideo();
+    void noteStopVideo();
+    void noteStartAudio();
+    void noteStopAudio();
+
+private:
+    int32_t mVideoRefCount;
+    int32_t mAudioRefCount;
+    sp<IBatteryStats> mBatteryStatService;
+};
+
+ANDROID_SINGLETON_STATIC_INSTANCE(MediaCodec::BatteryNotifier)
+
+MediaCodec::BatteryNotifier::BatteryNotifier() :
+    mVideoRefCount(0),
+    mAudioRefCount(0) {
+    // get battery service
+    const sp<IServiceManager> sm(defaultServiceManager());
+    if (sm != NULL) {
+        const String16 name("batterystats");
+        mBatteryStatService = interface_cast<IBatteryStats>(sm->getService(name));
+        if (mBatteryStatService == NULL) {
+            ALOGE("batterystats service unavailable!");
+        }
+    }
+}
+
+void MediaCodec::BatteryNotifier::noteStartVideo() {
+    if (mVideoRefCount == 0 && mBatteryStatService != NULL) {
+        mBatteryStatService->noteStartVideo(AID_MEDIA);
+    }
+    mVideoRefCount++;
+}
+
+void MediaCodec::BatteryNotifier::noteStopVideo() {
+    if (mVideoRefCount == 0) {
+        ALOGW("BatteryNotifier::noteStop(): video refcount is broken!");
+        return;
+    }
+
+    mVideoRefCount--;
+    if (mVideoRefCount == 0 && mBatteryStatService != NULL) {
+        mBatteryStatService->noteStopVideo(AID_MEDIA);
+    }
+}
+
+void MediaCodec::BatteryNotifier::noteStartAudio() {
+    if (mAudioRefCount == 0 && mBatteryStatService != NULL) {
+        mBatteryStatService->noteStartAudio(AID_MEDIA);
+    }
+    mAudioRefCount++;
+}
+
+void MediaCodec::BatteryNotifier::noteStopAudio() {
+    if (mAudioRefCount == 0) {
+        ALOGW("BatteryNotifier::noteStop(): audio refcount is broken!");
+        return;
+    }
+
+    mAudioRefCount--;
+    if (mAudioRefCount == 0 && mBatteryStatService != NULL) {
+        mBatteryStatService->noteStopAudio(AID_MEDIA);
+    }
+}
 // static
 sp<MediaCodec> MediaCodec::CreateByType(
         const sp<ALooper> &looper, const char *mime, bool encoder) {
@@ -71,6 +140,8 @@
       mReplyID(0),
       mFlags(0),
       mSoftRenderer(NULL),
+      mBatteryStatNotified(false),
+      mIsVideo(false),
       mDequeueInputTimeoutGeneration(0),
       mDequeueInputReplyID(0),
       mDequeueOutputTimeoutGeneration(0),
@@ -756,7 +827,6 @@
                 case CodecBase::kWhatComponentConfigured:
                 {
                     CHECK_EQ(mState, CONFIGURING);
-                    setState(CONFIGURED);
 
                     // reset input surface flag
                     mHaveInputSurface = false;
@@ -764,6 +834,7 @@
                     CHECK(msg->findMessage("input-format", &mInputFormat));
                     CHECK(msg->findMessage("output-format", &mOutputFormat));
 
+                    setState(CONFIGURED);
                     (new AMessage)->postReply(mReplyID);
                     break;
                 }
@@ -1620,6 +1691,8 @@
     mState = newState;
 
     cancelPendingDequeueOperations();
+
+    updateBatteryStat();
 }
 
 void MediaCodec::returnBuffersToCodec() {
@@ -2054,4 +2127,34 @@
     return OK;
 }
 
+void MediaCodec::updateBatteryStat() {
+    if (mState == CONFIGURED && !mBatteryStatNotified) {
+        AString mime;
+        CHECK(mOutputFormat != NULL &&
+                mOutputFormat->findString("mime", &mime));
+
+        mIsVideo = mime.startsWithIgnoreCase("video/");
+
+        BatteryNotifier& notifier(BatteryNotifier::getInstance());
+
+        if (mIsVideo) {
+            notifier.noteStartVideo();
+        } else {
+            notifier.noteStartAudio();
+        }
+
+        mBatteryStatNotified = true;
+    } else if (mState == UNINITIALIZED && mBatteryStatNotified) {
+        BatteryNotifier& notifier(BatteryNotifier::getInstance());
+
+        if (mIsVideo) {
+            notifier.noteStopVideo();
+        } else {
+            notifier.noteStopAudio();
+        }
+
+        mBatteryStatNotified = false;
+    }
+}
+
 }  // namespace android
diff --git a/media/libstagefright/MediaCodecList.cpp b/media/libstagefright/MediaCodecList.cpp
index cd51582..8f54343 100644
--- a/media/libstagefright/MediaCodecList.cpp
+++ b/media/libstagefright/MediaCodecList.cpp
@@ -21,6 +21,7 @@
 #include <media/stagefright/MediaCodecList.h>
 
 #include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AMessage.h>
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/OMXClient.h>
 #include <media/stagefright/OMXCodec.h>
@@ -79,6 +80,19 @@
                   info->mName.c_str());
 
             mCodecInfos.removeAt(i);
+#if LOG_NDEBUG == 0
+        } else {
+            for (size_t type_ix = 0; type_ix < mTypes.size(); ++type_ix) {
+                uint32_t typeMask = 1ul << mTypes.valueAt(type_ix);
+                if (info->mTypes & typeMask) {
+                    AString mime = mTypes.keyAt(type_ix);
+                    uint32_t bit = mTypes.valueAt(type_ix);
+
+                    ALOGV("%s codec info for %s: %s", info->mName.c_str(), mime.c_str(),
+                            info->mCaps.editValueFor(bit)->debugString().c_str());
+                }
+            }
+#endif
         }
     }
 
@@ -217,6 +231,8 @@
         return;
     }
 
+    bool inType = true;
+
     if (!strcmp(name, "Include")) {
         mInitCheck = includeXMLFile(attrs);
         if (mInitCheck == OK) {
@@ -267,6 +283,26 @@
                 mInitCheck = addQuirk(attrs);
             } else if (!strcmp(name, "Type")) {
                 mInitCheck = addTypeFromAttributes(attrs);
+                mCurrentSection =
+                    (mCurrentSection == SECTION_DECODER
+                            ? SECTION_DECODER_TYPE : SECTION_ENCODER_TYPE);
+            }
+        }
+        inType = false;
+        // fall through
+
+        case SECTION_DECODER_TYPE:
+        case SECTION_ENCODER_TYPE:
+        {
+            CodecInfo *info = &mCodecInfos.editItemAt(mCodecInfos.size() - 1);
+            // ignore limits and features specified outside of type
+            bool outside = !inType && info->mSoleType == 0;
+            if (outside && (!strcmp(name, "Limit") || !strcmp(name, "Feature"))) {
+                ALOGW("ignoring %s specified outside of a Type", name);
+            } else if (!strcmp(name, "Limit")) {
+                mInitCheck = addLimit(attrs);
+            } else if (!strcmp(name, "Feature")) {
+                mInitCheck = addFeature(attrs);
             }
             break;
         }
@@ -300,10 +336,27 @@
             break;
         }
 
+        case SECTION_DECODER_TYPE:
+        case SECTION_ENCODER_TYPE:
+        {
+            if (!strcmp(name, "Type")) {
+                mCurrentSection =
+                    (mCurrentSection == SECTION_DECODER_TYPE
+                            ? SECTION_DECODER : SECTION_ENCODER);
+
+                CodecInfo *info = &mCodecInfos.editItemAt(mCodecInfos.size() - 1);
+                info->mCurrentCaps = NULL;
+            }
+            break;
+        }
+
         case SECTION_DECODER:
         {
             if (!strcmp(name, "MediaCodec")) {
                 mCurrentSection = SECTION_DECODERS;
+
+                CodecInfo *info = &mCodecInfos.editItemAt(mCodecInfos.size() - 1);
+                info->mCurrentCaps = NULL;
             }
             break;
         }
@@ -312,6 +365,9 @@
         {
             if (!strcmp(name, "MediaCodec")) {
                 mCurrentSection = SECTION_ENCODERS;
+
+                CodecInfo *info = &mCodecInfos.editItemAt(mCodecInfos.size() - 1);
+                info->mCurrentCaps = NULL;
             }
             break;
         }
@@ -373,11 +429,16 @@
     CodecInfo *info = &mCodecInfos.editItemAt(mCodecInfos.size() - 1);
     info->mName = name;
     info->mIsEncoder = encoder;
+    info->mSoleType = 0;
     info->mTypes = 0;
     info->mQuirks = 0;
+    info->mCurrentCaps = NULL;
 
     if (type != NULL) {
         addType(type);
+        // if type was specified in attributes, we do not allow
+        // subsequent types
+        info->mSoleType = info->mTypes;
     }
 }
 
@@ -427,6 +488,12 @@
 status_t MediaCodecList::addTypeFromAttributes(const char **attrs) {
     const char *name = NULL;
 
+    CodecInfo *info = &mCodecInfos.editItemAt(mCodecInfos.size() - 1);
+    if (info->mSoleType != 0) {
+        ALOGE("Codec '%s' already had its type specified", info->mName.c_str());
+        return -EINVAL;
+    }
+
     size_t i = 0;
     while (attrs[i] != NULL) {
         if (!strcmp(attrs[i], "name")) {
@@ -469,6 +536,11 @@
 
     CodecInfo *info = &mCodecInfos.editItemAt(mCodecInfos.size() - 1);
     info->mTypes |= 1ul << bit;
+    if (info->mCaps.indexOfKey(bit) < 0) {
+        AMessage *msg = new AMessage();
+        info->mCaps.add(bit, msg);
+    }
+    info->mCurrentCaps = info->mCaps.editValueFor(bit);
 }
 
 ssize_t MediaCodecList::findCodecByType(
@@ -494,6 +566,216 @@
     return -ENOENT;
 }
 
+static status_t limitFoundMissingAttr(AString name, const char *attr, bool found = true) {
+    ALOGE("limit '%s' with %s'%s' attribute", name.c_str(),
+            (found ? "" : "no "), attr);
+    return -EINVAL;
+}
+
+static status_t limitError(AString name, const char *msg) {
+    ALOGE("limit '%s' %s", name.c_str(), msg);
+    return -EINVAL;
+}
+
+static status_t limitInvalidAttr(AString name, const char *attr, AString value) {
+    ALOGE("limit '%s' with invalid '%s' attribute (%s)", name.c_str(),
+            attr, value.c_str());
+    return -EINVAL;
+}
+
+status_t MediaCodecList::addLimit(const char **attrs) {
+    sp<AMessage> msg = new AMessage();
+
+    size_t i = 0;
+    while (attrs[i] != NULL) {
+        if (attrs[i + 1] == NULL) {
+            return -EINVAL;
+        }
+
+        // attributes with values
+        if (!strcmp(attrs[i], "name")
+                || !strcmp(attrs[i], "default")
+                || !strcmp(attrs[i], "in")
+                || !strcmp(attrs[i], "max")
+                || !strcmp(attrs[i], "min")
+                || !strcmp(attrs[i], "range")
+                || !strcmp(attrs[i], "ranges")
+                || !strcmp(attrs[i], "scale")
+                || !strcmp(attrs[i], "value")) {
+            msg->setString(attrs[i], attrs[i + 1]);
+            ++i;
+        } else {
+            return -EINVAL;
+        }
+        ++i;
+    }
+
+    AString name;
+    if (!msg->findString("name", &name)) {
+        ALOGE("limit with no 'name' attribute");
+        return -EINVAL;
+    }
+
+    CodecInfo *info = &mCodecInfos.editItemAt(mCodecInfos.size() - 1);
+
+    // size, blocks, bitrate, frame-rate, blocks-per-second, aspect-ratio: range
+    // quality: range + default + [scale]
+    // complexity: range + default
+    bool found;
+    if (name == "aspect-ratio" || name == "bitrate" || name == "block-count"
+            || name == "blocks-per-second" || name == "complexity"
+            || name == "frame-rate" || name == "quality" || name == "size") {
+        AString min, max;
+        if (msg->findString("min", &min) && msg->findString("max", &max)) {
+            min.append("-");
+            min.append(max);
+            if (msg->contains("range") || msg->contains("value")) {
+                return limitError(name, "has 'min' and 'max' as well as 'range' or "
+                        "'value' attributes");
+            }
+            msg->setString("range", min);
+        } else if (msg->contains("min") || msg->contains("max")) {
+            return limitError(name, "has only 'min' or 'max' attribute");
+        } else if (msg->findString("value", &max)) {
+            min = max;
+            min.append("-");
+            min.append(max);
+            if (msg->contains("range")) {
+                return limitError(name, "has both 'range' and 'value' attributes");
+            }
+            msg->setString("range", min);
+        }
+
+        AString range, scale = "linear", def, in_;
+        if (!msg->findString("range", &range)) {
+            return limitError(name, "with no 'range', 'value' or 'min'/'max' attributes");
+        }
+
+        if ((name == "quality" || name == "complexity") ^
+                (found = msg->findString("default", &def))) {
+            return limitFoundMissingAttr(name, "default", found);
+        }
+        if (name != "quality" && msg->findString("scale", &scale)) {
+            return limitFoundMissingAttr(name, "scale");
+        }
+        if ((name == "aspect-ratio") ^ (found = msg->findString("in", &in_))) {
+            return limitFoundMissingAttr(name, "in", found);
+        }
+
+        if (name == "aspect-ratio") {
+            if (!(in_ == "pixels") && !(in_ == "blocks")) {
+                return limitInvalidAttr(name, "in", in_);
+            }
+            in_.erase(5, 1); // (pixel|block)-aspect-ratio
+            in_.append("-");
+            in_.append(name);
+            name = in_;
+        }
+        if (name == "quality") {
+            info->mCurrentCaps->setString("quality-scale", scale);
+        }
+        if (name == "quality" || name == "complexity") {
+            AString tag = name;
+            tag.append("-default");
+            info->mCurrentCaps->setString(tag.c_str(), def);
+        }
+        AString tag = name;
+        tag.append("-range");
+        info->mCurrentCaps->setString(tag.c_str(), range);
+    } else {
+        AString max, value, ranges;
+        if (msg->contains("default")) {
+            return limitFoundMissingAttr(name, "default");
+        } else if (msg->contains("in")) {
+            return limitFoundMissingAttr(name, "in");
+        } else if ((name == "channel-count") ^
+                (found = msg->findString("max", &max))) {
+            return limitFoundMissingAttr(name, "max", found);
+        } else if (msg->contains("min")) {
+            return limitFoundMissingAttr(name, "min");
+        } else if (msg->contains("range")) {
+            return limitFoundMissingAttr(name, "range");
+        } else if ((name == "sample-rate") ^
+                (found = msg->findString("ranges", &ranges))) {
+            return limitFoundMissingAttr(name, "ranges", found);
+        } else if (msg->contains("scale")) {
+            return limitFoundMissingAttr(name, "scale");
+        } else if ((name == "alignment" || name == "block-size") ^
+                (found = msg->findString("value", &value))) {
+            return limitFoundMissingAttr(name, "value", found);
+        }
+
+        if (max.size()) {
+            AString tag = "max-";
+            tag.append(name);
+            info->mCurrentCaps->setString(tag.c_str(), max);
+        } else if (value.size()) {
+            info->mCurrentCaps->setString(name.c_str(), value);
+        } else if (ranges.size()) {
+            AString tag = name;
+            tag.append("-ranges");
+            info->mCurrentCaps->setString(tag.c_str(), ranges);
+        } else {
+            ALOGW("Ignoring unrecognized limit '%s'", name.c_str());
+        }
+    }
+    return OK;
+}
+
+static bool parseBoolean(const char *s) {
+    if (!strcasecmp(s, "true") || !strcasecmp(s, "yes") || !strcasecmp(s, "y")) {
+        return true;
+    }
+    char *end;
+    unsigned long res = strtoul(s, &end, 10);
+    return *s != '\0' && *end == '\0' && res > 0;
+}
+
+status_t MediaCodecList::addFeature(const char **attrs) {
+    size_t i = 0;
+    const char *name = NULL;
+    int32_t optional = -1;
+    int32_t required = -1;
+
+    while (attrs[i] != NULL) {
+        if (attrs[i + 1] == NULL) {
+            return -EINVAL;
+        }
+
+        // attributes with values
+        if (!strcmp(attrs[i], "name")) {
+            name = attrs[i + 1];
+            ++i;
+        } else if (!strcmp(attrs[i], "optional") || !strcmp(attrs[i], "required")) {
+            int value = (int)parseBoolean(attrs[i + 1]);
+            if (!strcmp(attrs[i], "optional")) {
+                optional = value;
+            } else {
+                required = value;
+            }
+            ++i;
+        } else {
+            return -EINVAL;
+        }
+        ++i;
+    }
+    if (name == NULL) {
+        ALOGE("feature with no 'name' attribute");
+        return -EINVAL;
+    }
+
+    if (optional == required && optional != -1) {
+        ALOGE("feature '%s' is both/neither optional and required", name);
+        return -EINVAL;
+    }
+
+    CodecInfo *info = &mCodecInfos.editItemAt(mCodecInfos.size() - 1);
+    AString tag = "feature-";
+    tag.append(name);
+    info->mCurrentCaps->setInt32(tag.c_str(), (required == 1) || (optional == 0));
+    return OK;
+}
+
 ssize_t MediaCodecList::findCodecByName(const char *name) const {
     for (size_t i = 0; i < mCodecInfos.size(); ++i) {
         const CodecInfo &info = mCodecInfos.itemAt(i);
@@ -571,7 +853,8 @@
         size_t index, const char *type,
         Vector<ProfileLevel> *profileLevels,
         Vector<uint32_t> *colorFormats,
-        uint32_t *flags) const {
+        uint32_t *flags,
+        sp<AMessage> *capabilities) const {
     profileLevels->clear();
     colorFormats->clear();
 
@@ -581,6 +864,13 @@
 
     const CodecInfo &info = mCodecInfos.itemAt(index);
 
+    ssize_t typeIndex = mTypes.indexOfKey(type);
+    if (typeIndex < 0) {
+        return -EINVAL;
+    }
+    // essentially doing valueFor without the CHECK abort
+    typeIndex = mTypes.valueAt(typeIndex);
+
     OMXClient client;
     status_t err = client.connect();
     if (err != OK) {
@@ -611,6 +901,11 @@
 
     *flags = caps.mFlags;
 
+    // TODO this check will be removed once JNI side is merged
+    if (capabilities != NULL) {
+        *capabilities = info.mCaps.valueFor(typeIndex);
+    }
+
     return OK;
 }
 
diff --git a/media/libstagefright/MediaSource.cpp b/media/libstagefright/MediaSource.cpp
index fd0e79c..576471a 100644
--- a/media/libstagefright/MediaSource.cpp
+++ b/media/libstagefright/MediaSource.cpp
@@ -32,6 +32,19 @@
     mOptions = 0;
     mSeekTimeUs = 0;
     mLatenessUs = 0;
+    mNonBlocking = false;
+}
+
+void MediaSource::ReadOptions::setNonBlocking() {
+    mNonBlocking = true;
+}
+
+void MediaSource::ReadOptions::clearNonBlocking() {
+    mNonBlocking = false;
+}
+
+bool MediaSource::ReadOptions::getNonBlocking() const {
+    return mNonBlocking;
 }
 
 void MediaSource::ReadOptions::setSeekTo(int64_t time_us, SeekMode mode) {
diff --git a/media/libstagefright/OggExtractor.cpp b/media/libstagefright/OggExtractor.cpp
index 8c15929..821bd81 100644
--- a/media/libstagefright/OggExtractor.cpp
+++ b/media/libstagefright/OggExtractor.cpp
@@ -320,22 +320,26 @@
     }
 
     size_t left = 0;
-    size_t right = mTableOfContents.size();
-    while (left < right) {
-        size_t center = left / 2 + right / 2 + (left & right & 1);
+    size_t right_plus_one = mTableOfContents.size();
+    while (left < right_plus_one) {
+        size_t center = left + (right_plus_one - left) / 2;
 
         const TOCEntry &entry = mTableOfContents.itemAt(center);
 
         if (timeUs < entry.mTimeUs) {
-            right = center;
+            right_plus_one = center;
         } else if (timeUs > entry.mTimeUs) {
             left = center + 1;
         } else {
-            left = right = center;
+            left = center;
             break;
         }
     }
 
+    if (left == mTableOfContents.size()) {
+        --left;
+    }
+
     const TOCEntry &entry = mTableOfContents.itemAt(left);
 
     ALOGV("seeking to entry %zu / %zu at offset %lld",
diff --git a/media/libstagefright/SampleTable.cpp b/media/libstagefright/SampleTable.cpp
index 9a92805..bad43f2 100644
--- a/media/libstagefright/SampleTable.cpp
+++ b/media/libstagefright/SampleTable.cpp
@@ -520,83 +520,72 @@
 }
 
 status_t SampleTable::findSampleAtTime(
-        uint32_t req_time, uint32_t *sample_index, uint32_t flags) {
+        uint64_t req_time, uint64_t scale_num, uint64_t scale_den,
+        uint32_t *sample_index, uint32_t flags) {
     buildSampleEntriesTable();
 
     uint32_t left = 0;
-    uint32_t right = mNumSampleSizes;
-    while (left < right) {
-        uint32_t center = (left + right) / 2;
-        uint32_t centerTime = mSampleTimeEntries[center].mCompositionTime;
+    uint32_t right_plus_one = mNumSampleSizes;
+    while (left < right_plus_one) {
+        uint32_t center = left + (right_plus_one - left) / 2;
+        uint64_t centerTime =
+            getSampleTime(center, scale_num, scale_den);
 
         if (req_time < centerTime) {
-            right = center;
+            right_plus_one = center;
         } else if (req_time > centerTime) {
             left = center + 1;
         } else {
-            left = center;
-            break;
+            *sample_index = mSampleTimeEntries[center].mSampleIndex;
+            return OK;
         }
     }
 
-    if (left == mNumSampleSizes) {
-        if (flags == kFlagAfter) {
-            return ERROR_OUT_OF_RANGE;
-        }
-
-        --left;
-    }
-
     uint32_t closestIndex = left;
 
+    if (closestIndex == mNumSampleSizes) {
+        if (flags == kFlagAfter) {
+            return ERROR_OUT_OF_RANGE;
+        }
+        flags = kFlagBefore;
+    } else if (closestIndex == 0) {
+        if (flags == kFlagBefore) {
+            // normally we should return out of range, but that is
+            // treated as end-of-stream.  instead return first sample
+            //
+            // return ERROR_OUT_OF_RANGE;
+        }
+        flags = kFlagAfter;
+    }
+
     switch (flags) {
         case kFlagBefore:
         {
-            while (closestIndex > 0
-                    && mSampleTimeEntries[closestIndex].mCompositionTime
-                            > req_time) {
-                --closestIndex;
-            }
+            --closestIndex;
             break;
         }
 
         case kFlagAfter:
         {
-            while (closestIndex + 1 < mNumSampleSizes
-                    && mSampleTimeEntries[closestIndex].mCompositionTime
-                            < req_time) {
-                ++closestIndex;
-            }
+            // nothing to do
             break;
         }
 
         default:
         {
             CHECK(flags == kFlagClosest);
-
-            if (closestIndex > 0) {
-                // Check left neighbour and pick closest.
-                uint32_t absdiff1 =
-                    abs_difference(
-                            mSampleTimeEntries[closestIndex].mCompositionTime,
-                            req_time);
-
-                uint32_t absdiff2 =
-                    abs_difference(
-                            mSampleTimeEntries[closestIndex - 1].mCompositionTime,
-                            req_time);
-
-                if (absdiff1 > absdiff2) {
-                    closestIndex = closestIndex - 1;
-                }
+            // pick closest based on timestamp. use abs_difference for safety
+            if (abs_difference(
+                    getSampleTime(closestIndex, scale_num, scale_den), req_time) >
+                abs_difference(
+                    req_time, getSampleTime(closestIndex - 1, scale_num, scale_den))) {
+                --closestIndex;
             }
-
             break;
         }
     }
 
     *sample_index = mSampleTimeEntries[closestIndex].mSampleIndex;
-
     return OK;
 }
 
@@ -618,109 +607,85 @@
     }
 
     uint32_t left = 0;
-    uint32_t right = mNumSyncSamples;
-    while (left < right) {
-        uint32_t center = left + (right - left) / 2;
+    uint32_t right_plus_one = mNumSyncSamples;
+    while (left < right_plus_one) {
+        uint32_t center = left + (right_plus_one - left) / 2;
         uint32_t x = mSyncSamples[center];
 
         if (start_sample_index < x) {
-            right = center;
+            right_plus_one = center;
         } else if (start_sample_index > x) {
             left = center + 1;
         } else {
-            left = center;
-            break;
+            *sample_index = x;
+            return OK;
         }
     }
+
     if (left == mNumSyncSamples) {
         if (flags == kFlagAfter) {
             ALOGE("tried to find a sync frame after the last one: %d", left);
             return ERROR_OUT_OF_RANGE;
         }
-        left = left - 1;
+        flags = kFlagBefore;
+    }
+    else if (left == 0) {
+        if (flags == kFlagBefore) {
+            ALOGE("tried to find a sync frame before the first one: %d", left);
+
+            // normally we should return out of range, but that is
+            // treated as end-of-stream.  instead seek to first sync
+            //
+            // return ERROR_OUT_OF_RANGE;
+        }
+        flags = kFlagAfter;
     }
 
-    // Now ssi[left] is the sync sample index just before (or at)
-    // start_sample_index.
-    // Also start_sample_index < ssi[left + 1], if left + 1 < mNumSyncSamples.
-
-    uint32_t x = mSyncSamples[left];
-
-    if (left + 1 < mNumSyncSamples) {
-        uint32_t y = mSyncSamples[left + 1];
-
-        // our sample lies between sync samples x and y.
-
-        status_t err = mSampleIterator->seekTo(start_sample_index);
-        if (err != OK) {
-            return err;
-        }
-
-        uint32_t sample_time = mSampleIterator->getSampleTime();
-
-        err = mSampleIterator->seekTo(x);
-        if (err != OK) {
-            return err;
-        }
-        uint32_t x_time = mSampleIterator->getSampleTime();
-
-        err = mSampleIterator->seekTo(y);
-        if (err != OK) {
-            return err;
-        }
-
-        uint32_t y_time = mSampleIterator->getSampleTime();
-
-        if (abs_difference(x_time, sample_time)
-                > abs_difference(y_time, sample_time)) {
-            // Pick the sync sample closest (timewise) to the start-sample.
-            x = y;
-            ++left;
-        }
-    }
-
+    // Now ssi[left - 1] <(=) start_sample_index <= ssi[left]
     switch (flags) {
         case kFlagBefore:
         {
-            if (x > start_sample_index) {
-                CHECK(left > 0);
-
-                x = mSyncSamples[left - 1];
-
-                if (x > start_sample_index) {
-                    // The table of sync sample indices was not sorted
-                    // properly.
-                    return ERROR_MALFORMED;
-                }
-            }
+            --left;
             break;
         }
-
         case kFlagAfter:
         {
-            if (x < start_sample_index) {
-                if (left + 1 >= mNumSyncSamples) {
-                    return ERROR_OUT_OF_RANGE;
-                }
-
-                x = mSyncSamples[left + 1];
-
-                if (x < start_sample_index) {
-                    // The table of sync sample indices was not sorted
-                    // properly.
-                    return ERROR_MALFORMED;
-                }
-            }
-
+            // nothing to do
             break;
         }
-
         default:
+        {
+            // this route is not used, but implement it nonetheless
+            CHECK(flags == kFlagClosest);
+
+            status_t err = mSampleIterator->seekTo(start_sample_index);
+            if (err != OK) {
+                return err;
+            }
+            uint32_t sample_time = mSampleIterator->getSampleTime();
+
+            err = mSampleIterator->seekTo(mSyncSamples[left]);
+            if (err != OK) {
+                return err;
+            }
+            uint32_t upper_time = mSampleIterator->getSampleTime();
+
+            err = mSampleIterator->seekTo(mSyncSamples[left - 1]);
+            if (err != OK) {
+                return err;
+            }
+            uint32_t lower_time = mSampleIterator->getSampleTime();
+
+            // use abs_difference for safety
+            if (abs_difference(upper_time, sample_time) >
+                abs_difference(sample_time, lower_time)) {
+                --left;
+            }
             break;
+        }
     }
 
-    *sample_index = x;
-
+    *sample_index = mSyncSamples[left];
     return OK;
 }
 
diff --git a/media/libstagefright/foundation/AMessage.cpp b/media/libstagefright/foundation/AMessage.cpp
index dc42f91..d268aa4 100644
--- a/media/libstagefright/foundation/AMessage.cpp
+++ b/media/libstagefright/foundation/AMessage.cpp
@@ -127,6 +127,20 @@
     return NULL;
 }
 
+bool AMessage::contains(const char *name) const {
+    name = AAtomizer::Atomize(name);
+
+    for (size_t i = 0; i < mNumItems; ++i) {
+        const Item *item = &mItems[i];
+
+        if (item->mName == name) {
+            return true;
+        }
+    }
+
+    return false;
+}
+
 #define BASIC_TYPE(NAME,FIELDNAME,TYPENAME)                             \
 void AMessage::set##NAME(const char *name, TYPENAME value) {            \
     Item *item = allocateItem(name);                                    \
@@ -160,6 +174,11 @@
     item->u.stringValue = new AString(s, len < 0 ? strlen(s) : len);
 }
 
+void AMessage::setString(
+        const char *name, const AString &s) {
+    setString(name, s.c_str(), s.size());
+}
+
 void AMessage::setObjectInternal(
         const char *name, const sp<RefBase> &obj, Type type) {
     Item *item = allocateItem(name);
diff --git a/media/libstagefright/include/SampleTable.h b/media/libstagefright/include/SampleTable.h
index fe146f2..d06df7b 100644
--- a/media/libstagefright/include/SampleTable.h
+++ b/media/libstagefright/include/SampleTable.h
@@ -75,7 +75,8 @@
         kFlagClosest
     };
     status_t findSampleAtTime(
-            uint32_t req_time, uint32_t *sample_index, uint32_t flags);
+            uint64_t req_time, uint64_t scale_num, uint64_t scale_den,
+            uint32_t *sample_index, uint32_t flags);
 
     status_t findSyncSampleNear(
             uint32_t start_sample_index, uint32_t *sample_index,
@@ -138,6 +139,13 @@
 
     friend struct SampleIterator;
 
+    // normally we don't round
+    inline uint64_t getSampleTime(
+            size_t sample_index, uint64_t scale_num, uint64_t scale_den) const {
+        return (mSampleTimeEntries[sample_index].mCompositionTime
+            * scale_num) / scale_den;
+    }
+
     status_t getSampleSize_l(uint32_t sample_index, size_t *sample_size);
     uint32_t getCompositionTimeOffset(uint32_t sampleIndex);
 
diff --git a/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp b/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp
index 871824a..a0319ab 100644
--- a/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp
+++ b/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp
@@ -230,6 +230,11 @@
             int32_t oldDiscontinuityType;
             if (!oldBuffer->meta()->findInt32(
                         "discontinuity", &oldDiscontinuityType)) {
+                MediaBuffer *mbuf = NULL;
+                oldBuffer->meta()->findPointer("mediaBuffer", (void**)&mbuf);
+                if (mbuf != NULL) {
+                    mbuf->release();
+                }
                 it = mBuffers.erase(it);
                 continue;
             }
diff --git a/media/libstagefright/omx/OMX.cpp b/media/libstagefright/omx/OMX.cpp
index 22b12d9..cc4770a 100644
--- a/media/libstagefright/omx/OMX.cpp
+++ b/media/libstagefright/omx/OMX.cpp
@@ -233,7 +233,7 @@
             instance, &handle);
 
     if (err != OMX_ErrorNone) {
-        ALOGV("FAILED to allocate omx component '%s'", name);
+        ALOGE("FAILED to allocate omx component '%s'", name);
 
         instance->onGetHandleFailed();
 
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 5fd7ce8..1ad6285 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -1820,7 +1820,8 @@
                                           audio_devices_t *pDevices,
                                           uint32_t *pSamplingRate,
                                           audio_format_t *pFormat,
-                                          audio_channel_mask_t *pChannelMask)
+                                          audio_channel_mask_t *pChannelMask,
+                                          audio_input_flags_t flags)
 {
     struct audio_config config;
     memset(&config, 0, sizeof(config));
@@ -1847,15 +1848,15 @@
     audio_io_handle_t id = nextUniqueId();
 
     audio_stream_in_t *inStream = NULL;
-    audio_input_flags_t flags = AUDIO_INPUT_FLAG_FAST;  // FIXME until added to openInput()
     status_t status = inHwHal->open_input_stream(inHwHal, id, *pDevices, &config,
                                         &inStream, flags);
     ALOGV("openInput() openInputStream returned input %p, SamplingRate %d, Format %#x, Channels %x, "
-            "status %d",
+            "flags %#x, status %d",
             inStream,
             config.sample_rate,
             config.format,
             config.channel_mask,
+            flags,
             status);
 
     // If the input could not be opened with the requested parameters and we can handle the
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index 4df0921..bae18fd 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -179,7 +179,8 @@
                                         audio_devices_t *pDevices,
                                         uint32_t *pSamplingRate,
                                         audio_format_t *pFormat,
-                                        audio_channel_mask_t *pChannelMask);
+                                        audio_channel_mask_t *pChannelMask,
+                                        audio_input_flags_t flags);
 
     virtual status_t closeInput(audio_io_handle_t input);
 
@@ -327,7 +328,7 @@
     void                    purgeStaleEffects_l();
 
     // Set kEnableExtendedPrecision to true to use extended precision in MixerThread
-    static const bool kEnableExtendedPrecision = false;
+    static const bool kEnableExtendedPrecision = true;
 
     // Returns true if format is permitted for the PCM sink in the MixerThread
     static inline bool isValidPcmSinkFormat(audio_format_t format) {
diff --git a/services/audioflinger/AudioMixer.cpp b/services/audioflinger/AudioMixer.cpp
index e57cb8a..529f2af 100644
--- a/services/audioflinger/AudioMixer.cpp
+++ b/services/audioflinger/AudioMixer.cpp
@@ -62,6 +62,10 @@
 #define ALOGVV(a...) do { } while (0)
 #endif
 
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(x) (sizeof(x)/sizeof((x)[0]))
+#endif
+
 // Set kUseNewMixer to true to use the new mixer engine. Otherwise the
 // original code will be used.  This is false for now.
 static const bool kUseNewMixer = false;
@@ -71,52 +75,12 @@
 // because of downmix/upmix support.
 static const bool kUseFloat = true;
 
+// Set to default copy buffer size in frames for input processing.
+static const size_t kCopyBufferFrameCount = 256;
+
 namespace android {
 
 // ----------------------------------------------------------------------------
-AudioMixer::DownmixerBufferProvider::DownmixerBufferProvider() : AudioBufferProvider(),
-        mTrackBufferProvider(NULL), mDownmixHandle(NULL)
-{
-}
-
-AudioMixer::DownmixerBufferProvider::~DownmixerBufferProvider()
-{
-    ALOGV("AudioMixer deleting DownmixerBufferProvider (%p)", this);
-    EffectRelease(mDownmixHandle);
-}
-
-status_t AudioMixer::DownmixerBufferProvider::getNextBuffer(AudioBufferProvider::Buffer *pBuffer,
-        int64_t pts) {
-    //ALOGV("DownmixerBufferProvider::getNextBuffer()");
-    if (mTrackBufferProvider != NULL) {
-        status_t res = mTrackBufferProvider->getNextBuffer(pBuffer, pts);
-        if (res == OK) {
-            mDownmixConfig.inputCfg.buffer.frameCount = pBuffer->frameCount;
-            mDownmixConfig.inputCfg.buffer.raw = pBuffer->raw;
-            mDownmixConfig.outputCfg.buffer.frameCount = pBuffer->frameCount;
-            mDownmixConfig.outputCfg.buffer.raw = mDownmixConfig.inputCfg.buffer.raw;
-            // in-place so overwrite the buffer contents, has been set in prepareTrackForDownmix()
-            //mDownmixConfig.outputCfg.accessMode = EFFECT_BUFFER_ACCESS_WRITE;
-
-            res = (*mDownmixHandle)->process(mDownmixHandle,
-                    &mDownmixConfig.inputCfg.buffer, &mDownmixConfig.outputCfg.buffer);
-            //ALOGV("getNextBuffer is downmixing");
-        }
-        return res;
-    } else {
-        ALOGE("DownmixerBufferProvider::getNextBuffer() error: NULL track buffer provider");
-        return NO_INIT;
-    }
-}
-
-void AudioMixer::DownmixerBufferProvider::releaseBuffer(AudioBufferProvider::Buffer *pBuffer) {
-    //ALOGV("DownmixerBufferProvider::releaseBuffer()");
-    if (mTrackBufferProvider != NULL) {
-        mTrackBufferProvider->releaseBuffer(pBuffer);
-    } else {
-        ALOGE("DownmixerBufferProvider::releaseBuffer() error: NULL track buffer provider");
-    }
-}
 
 template <typename T>
 T min(const T& a, const T& b)
@@ -124,102 +88,289 @@
     return a < b ? a : b;
 }
 
-AudioMixer::ReformatBufferProvider::ReformatBufferProvider(int32_t channels,
-        audio_format_t inputFormat, audio_format_t outputFormat) :
-        mTrackBufferProvider(NULL),
-        mChannels(channels),
-        mInputFormat(inputFormat),
-        mOutputFormat(outputFormat),
-        mInputFrameSize(channels * audio_bytes_per_sample(inputFormat)),
-        mOutputFrameSize(channels * audio_bytes_per_sample(outputFormat)),
-        mOutputData(NULL),
-        mOutputCount(0),
+AudioMixer::CopyBufferProvider::CopyBufferProvider(size_t inputFrameSize,
+        size_t outputFrameSize, size_t bufferFrameCount) :
+        mInputFrameSize(inputFrameSize),
+        mOutputFrameSize(outputFrameSize),
+        mLocalBufferFrameCount(bufferFrameCount),
+        mLocalBufferData(NULL),
         mConsumed(0)
 {
-    ALOGV("ReformatBufferProvider(%p)(%d, %#x, %#x)", this, channels, inputFormat, outputFormat);
-    if (requiresInternalBuffers()) {
-        mOutputCount = 256;
-        (void)posix_memalign(&mOutputData, 32, mOutputCount * mOutputFrameSize);
+    ALOGV("CopyBufferProvider(%p)(%zu, %zu, %zu)", this,
+            inputFrameSize, outputFrameSize, bufferFrameCount);
+    LOG_ALWAYS_FATAL_IF(inputFrameSize < outputFrameSize && bufferFrameCount == 0,
+            "Requires local buffer if inputFrameSize(%d) < outputFrameSize(%d)",
+            inputFrameSize, outputFrameSize);
+    if (mLocalBufferFrameCount) {
+        (void)posix_memalign(&mLocalBufferData, 32, mLocalBufferFrameCount * mOutputFrameSize);
     }
     mBuffer.frameCount = 0;
 }
 
-AudioMixer::ReformatBufferProvider::~ReformatBufferProvider()
+AudioMixer::CopyBufferProvider::~CopyBufferProvider()
 {
-    ALOGV("~ReformatBufferProvider(%p)", this);
+    ALOGV("~CopyBufferProvider(%p)", this);
     if (mBuffer.frameCount != 0) {
         mTrackBufferProvider->releaseBuffer(&mBuffer);
     }
-    free(mOutputData);
+    free(mLocalBufferData);
 }
 
-status_t AudioMixer::ReformatBufferProvider::getNextBuffer(AudioBufferProvider::Buffer *pBuffer,
-        int64_t pts) {
-    //ALOGV("ReformatBufferProvider(%p)::getNextBuffer(%p (%zu), %lld)",
+status_t AudioMixer::CopyBufferProvider::getNextBuffer(AudioBufferProvider::Buffer *pBuffer,
+        int64_t pts)
+{
+    //ALOGV("CopyBufferProvider(%p)::getNextBuffer(%p (%zu), %lld)",
     //        this, pBuffer, pBuffer->frameCount, pts);
-    if (!requiresInternalBuffers()) {
+    if (mLocalBufferFrameCount == 0) {
         status_t res = mTrackBufferProvider->getNextBuffer(pBuffer, pts);
         if (res == OK) {
-            memcpy_by_audio_format(pBuffer->raw, mOutputFormat, pBuffer->raw, mInputFormat,
-                    pBuffer->frameCount * mChannels);
+            copyFrames(pBuffer->raw, pBuffer->raw, pBuffer->frameCount);
         }
         return res;
     }
     if (mBuffer.frameCount == 0) {
         mBuffer.frameCount = pBuffer->frameCount;
         status_t res = mTrackBufferProvider->getNextBuffer(&mBuffer, pts);
-        // TODO: Track down a bug in the upstream provider
-        // LOG_ALWAYS_FATAL_IF(res == OK && mBuffer.frameCount == 0,
-        //        "ReformatBufferProvider::getNextBuffer():"
-        //        " Invalid zero framecount returned from getNextBuffer()");
-        if (res != OK || mBuffer.frameCount == 0) {
+        // At one time an upstream buffer provider had
+        // res == OK and mBuffer.frameCount == 0, doesn't seem to happen now 7/18/2014.
+        //
+        // By API spec, if res != OK, then mBuffer.frameCount == 0.
+        // but there may be improper implementations.
+        ALOG_ASSERT(res == OK || mBuffer.frameCount == 0);
+        if (res != OK || mBuffer.frameCount == 0) { // not needed by API spec, but to be safe.
             pBuffer->raw = NULL;
             pBuffer->frameCount = 0;
             return res;
         }
+        mConsumed = 0;
     }
     ALOG_ASSERT(mConsumed < mBuffer.frameCount);
-    size_t count = min(mOutputCount, mBuffer.frameCount - mConsumed);
+    size_t count = min(mLocalBufferFrameCount, mBuffer.frameCount - mConsumed);
     count = min(count, pBuffer->frameCount);
-    pBuffer->raw = mOutputData;
+    pBuffer->raw = mLocalBufferData;
     pBuffer->frameCount = count;
-    //ALOGV("reformatting %d frames from %#x to %#x, %d chan",
-    //        pBuffer->frameCount, mInputFormat, mOutputFormat, mChannels);
-    memcpy_by_audio_format(pBuffer->raw, mOutputFormat,
-            (uint8_t*)mBuffer.raw + mConsumed * mInputFrameSize, mInputFormat,
-            pBuffer->frameCount * mChannels);
+    copyFrames(pBuffer->raw, (uint8_t*)mBuffer.raw + mConsumed * mInputFrameSize,
+            pBuffer->frameCount);
     return OK;
 }
 
-void AudioMixer::ReformatBufferProvider::releaseBuffer(AudioBufferProvider::Buffer *pBuffer) {
-    //ALOGV("ReformatBufferProvider(%p)::releaseBuffer(%p(%zu))",
+void AudioMixer::CopyBufferProvider::releaseBuffer(AudioBufferProvider::Buffer *pBuffer)
+{
+    //ALOGV("CopyBufferProvider(%p)::releaseBuffer(%p(%zu))",
     //        this, pBuffer, pBuffer->frameCount);
-    if (!requiresInternalBuffers()) {
+    if (mLocalBufferFrameCount == 0) {
         mTrackBufferProvider->releaseBuffer(pBuffer);
         return;
     }
     // LOG_ALWAYS_FATAL_IF(pBuffer->frameCount == 0, "Invalid framecount");
     mConsumed += pBuffer->frameCount; // TODO: update for efficiency to reuse existing content
     if (mConsumed != 0 && mConsumed >= mBuffer.frameCount) {
-        mConsumed = 0;
         mTrackBufferProvider->releaseBuffer(&mBuffer);
-        // ALOG_ASSERT(mBuffer.frameCount == 0);
+        ALOG_ASSERT(mBuffer.frameCount == 0);
     }
     pBuffer->raw = NULL;
     pBuffer->frameCount = 0;
 }
 
-void AudioMixer::ReformatBufferProvider::reset() {
+void AudioMixer::CopyBufferProvider::reset()
+{
     if (mBuffer.frameCount != 0) {
         mTrackBufferProvider->releaseBuffer(&mBuffer);
     }
     mConsumed = 0;
 }
 
-// ----------------------------------------------------------------------------
-bool AudioMixer::sIsMultichannelCapable = false;
+AudioMixer::DownmixerBufferProvider::DownmixerBufferProvider(
+        audio_channel_mask_t inputChannelMask,
+        audio_channel_mask_t outputChannelMask, audio_format_t format,
+        uint32_t sampleRate, int32_t sessionId, size_t bufferFrameCount) :
+        CopyBufferProvider(
+            audio_bytes_per_sample(format) * audio_channel_count_from_out_mask(inputChannelMask),
+            audio_bytes_per_sample(format) * audio_channel_count_from_out_mask(outputChannelMask),
+            bufferFrameCount)  // set bufferFrameCount to 0 to do in-place
+{
+    ALOGV("DownmixerBufferProvider(%p)(%#x, %#x, %#x %u %d)",
+            this, inputChannelMask, outputChannelMask, format,
+            sampleRate, sessionId);
+    if (!sIsMultichannelCapable
+            || EffectCreate(&sDwnmFxDesc.uuid,
+                    sessionId,
+                    SESSION_ID_INVALID_AND_IGNORED,
+                    &mDownmixHandle) != 0) {
+         ALOGE("DownmixerBufferProvider() error creating downmixer effect");
+         mDownmixHandle = NULL;
+         return;
+     }
+     // channel input configuration will be overridden per-track
+     mDownmixConfig.inputCfg.channels = inputChannelMask;   // FIXME: Should be bits
+     mDownmixConfig.outputCfg.channels = outputChannelMask; // FIXME: should be bits
+     mDownmixConfig.inputCfg.format = format;
+     mDownmixConfig.outputCfg.format = format;
+     mDownmixConfig.inputCfg.samplingRate = sampleRate;
+     mDownmixConfig.outputCfg.samplingRate = sampleRate;
+     mDownmixConfig.inputCfg.accessMode = EFFECT_BUFFER_ACCESS_READ;
+     mDownmixConfig.outputCfg.accessMode = EFFECT_BUFFER_ACCESS_WRITE;
+     // input and output buffer provider, and frame count will not be used as the downmix effect
+     // process() function is called directly (see DownmixerBufferProvider::getNextBuffer())
+     mDownmixConfig.inputCfg.mask = EFFECT_CONFIG_SMP_RATE | EFFECT_CONFIG_CHANNELS |
+             EFFECT_CONFIG_FORMAT | EFFECT_CONFIG_ACC_MODE;
+     mDownmixConfig.outputCfg.mask = mDownmixConfig.inputCfg.mask;
 
-effect_descriptor_t AudioMixer::sDwnmFxDesc;
+     int cmdStatus;
+     uint32_t replySize = sizeof(int);
+
+     // Configure downmixer
+     status_t status = (*mDownmixHandle)->command(mDownmixHandle,
+             EFFECT_CMD_SET_CONFIG /*cmdCode*/, sizeof(effect_config_t) /*cmdSize*/,
+             &mDownmixConfig /*pCmdData*/,
+             &replySize, &cmdStatus /*pReplyData*/);
+     if (status != 0 || cmdStatus != 0) {
+         ALOGE("DownmixerBufferProvider() error %d cmdStatus %d while configuring downmixer",
+                 status, cmdStatus);
+         EffectRelease(mDownmixHandle);
+         mDownmixHandle = NULL;
+         return;
+     }
+
+     // Enable downmixer
+     replySize = sizeof(int);
+     status = (*mDownmixHandle)->command(mDownmixHandle,
+             EFFECT_CMD_ENABLE /*cmdCode*/, 0 /*cmdSize*/, NULL /*pCmdData*/,
+             &replySize, &cmdStatus /*pReplyData*/);
+     if (status != 0 || cmdStatus != 0) {
+         ALOGE("DownmixerBufferProvider() error %d cmdStatus %d while enabling downmixer",
+                 status, cmdStatus);
+         EffectRelease(mDownmixHandle);
+         mDownmixHandle = NULL;
+         return;
+     }
+
+     // Set downmix type
+     // parameter size rounded for padding on 32bit boundary
+     const int psizePadded = ((sizeof(downmix_params_t) - 1)/sizeof(int) + 1) * sizeof(int);
+     const int downmixParamSize =
+             sizeof(effect_param_t) + psizePadded + sizeof(downmix_type_t);
+     effect_param_t * const param = (effect_param_t *) malloc(downmixParamSize);
+     param->psize = sizeof(downmix_params_t);
+     const downmix_params_t downmixParam = DOWNMIX_PARAM_TYPE;
+     memcpy(param->data, &downmixParam, param->psize);
+     const downmix_type_t downmixType = DOWNMIX_TYPE_FOLD;
+     param->vsize = sizeof(downmix_type_t);
+     memcpy(param->data + psizePadded, &downmixType, param->vsize);
+     replySize = sizeof(int);
+     status = (*mDownmixHandle)->command(mDownmixHandle,
+             EFFECT_CMD_SET_PARAM /* cmdCode */, downmixParamSize /* cmdSize */,
+             param /*pCmdData*/, &replySize, &cmdStatus /*pReplyData*/);
+     free(param);
+     if (status != 0 || cmdStatus != 0) {
+         ALOGE("DownmixerBufferProvider() error %d cmdStatus %d while setting downmix type",
+                 status, cmdStatus);
+         EffectRelease(mDownmixHandle);
+         mDownmixHandle = NULL;
+         return;
+     }
+     ALOGV("DownmixerBufferProvider() downmix type set to %d", (int) downmixType);
+}
+
+AudioMixer::DownmixerBufferProvider::~DownmixerBufferProvider()
+{
+    ALOGV("~DownmixerBufferProvider (%p)", this);
+    EffectRelease(mDownmixHandle);
+    mDownmixHandle = NULL;
+}
+
+void AudioMixer::DownmixerBufferProvider::copyFrames(void *dst, const void *src, size_t frames)
+{
+    mDownmixConfig.inputCfg.buffer.frameCount = frames;
+    mDownmixConfig.inputCfg.buffer.raw = const_cast<void *>(src);
+    mDownmixConfig.outputCfg.buffer.frameCount = frames;
+    mDownmixConfig.outputCfg.buffer.raw = dst;
+    // may be in-place if src == dst.
+    status_t res = (*mDownmixHandle)->process(mDownmixHandle,
+            &mDownmixConfig.inputCfg.buffer, &mDownmixConfig.outputCfg.buffer);
+    ALOGE_IF(res != OK, "DownmixBufferProvider error %d", res);
+}
+
+/* call once in a pthread_once handler. */
+/*static*/ status_t AudioMixer::DownmixerBufferProvider::init()
+{
+    // find multichannel downmix effect if we have to play multichannel content
+    uint32_t numEffects = 0;
+    int ret = EffectQueryNumberEffects(&numEffects);
+    if (ret != 0) {
+        ALOGE("AudioMixer() error %d querying number of effects", ret);
+        return NO_INIT;
+    }
+    ALOGV("EffectQueryNumberEffects() numEffects=%d", numEffects);
+
+    for (uint32_t i = 0 ; i < numEffects ; i++) {
+        if (EffectQueryEffect(i, &sDwnmFxDesc) == 0) {
+            ALOGV("effect %d is called %s", i, sDwnmFxDesc.name);
+            if (memcmp(&sDwnmFxDesc.type, EFFECT_UIID_DOWNMIX, sizeof(effect_uuid_t)) == 0) {
+                ALOGI("found effect \"%s\" from %s",
+                        sDwnmFxDesc.name, sDwnmFxDesc.implementor);
+                sIsMultichannelCapable = true;
+                break;
+            }
+        }
+    }
+    ALOGW_IF(!sIsMultichannelCapable, "unable to find downmix effect");
+    return NO_INIT;
+}
+
+/*static*/ bool AudioMixer::DownmixerBufferProvider::sIsMultichannelCapable = false;
+/*static*/ effect_descriptor_t AudioMixer::DownmixerBufferProvider::sDwnmFxDesc;
+
+AudioMixer::RemixBufferProvider::RemixBufferProvider(audio_channel_mask_t inputChannelMask,
+        audio_channel_mask_t outputChannelMask, audio_format_t format,
+        size_t bufferFrameCount) :
+        CopyBufferProvider(
+                audio_bytes_per_sample(format)
+                    * audio_channel_count_from_out_mask(inputChannelMask),
+                audio_bytes_per_sample(format)
+                    * audio_channel_count_from_out_mask(outputChannelMask),
+                bufferFrameCount),
+        mFormat(format),
+        mSampleSize(audio_bytes_per_sample(format)),
+        mInputChannels(audio_channel_count_from_out_mask(inputChannelMask)),
+        mOutputChannels(audio_channel_count_from_out_mask(outputChannelMask))
+{
+    ALOGV("RemixBufferProvider(%p)(%#x, %#x, %#x) %d %d",
+            this, format, inputChannelMask, outputChannelMask,
+            mInputChannels, mOutputChannels);
+    // TODO: consider channel representation in index array formulation
+    // We ignore channel representation, and just use the bits.
+    memcpy_by_index_array_initialization(mIdxAry, ARRAY_SIZE(mIdxAry),
+            audio_channel_mask_get_bits(outputChannelMask),
+            audio_channel_mask_get_bits(inputChannelMask));
+}
+
+void AudioMixer::RemixBufferProvider::copyFrames(void *dst, const void *src, size_t frames)
+{
+    memcpy_by_index_array(dst, mOutputChannels,
+            src, mInputChannels, mIdxAry, mSampleSize, frames);
+}
+
+AudioMixer::ReformatBufferProvider::ReformatBufferProvider(int32_t channels,
+        audio_format_t inputFormat, audio_format_t outputFormat,
+        size_t bufferFrameCount) :
+        CopyBufferProvider(
+            channels * audio_bytes_per_sample(inputFormat),
+            channels * audio_bytes_per_sample(outputFormat),
+            bufferFrameCount),
+        mChannels(channels),
+        mInputFormat(inputFormat),
+        mOutputFormat(outputFormat)
+{
+    ALOGV("ReformatBufferProvider(%p)(%d, %#x, %#x)", this, channels, inputFormat, outputFormat);
+}
+
+void AudioMixer::ReformatBufferProvider::copyFrames(void *dst, const void *src, size_t frames)
+{
+    memcpy_by_audio_format(dst, mOutputFormat, src, mInputFormat, frames * mChannels);
+}
+
+// ----------------------------------------------------------------------------
 
 // Ensure mConfiguredNames bitmask is initialized properly on all architectures.
 // The value of 1 << x is undefined in C when x >= 32.
@@ -258,6 +409,7 @@
     for (unsigned i=0 ; i < MAX_NUM_TRACKS ; i++) {
         t->resampler = NULL;
         t->downmixerBufferProvider = NULL;
+        t->mReformatBufferProvider = NULL;
         t++;
     }
 
@@ -269,6 +421,7 @@
     for (unsigned i=0 ; i < MAX_NUM_TRACKS ; i++) {
         delete t->resampler;
         delete t->downmixerBufferProvider;
+        delete t->mReformatBufferProvider;
         t++;
     }
     delete [] mState.outputTemp;
@@ -409,95 +562,20 @@
 
     // discard the previous downmixer if there was one
     unprepareTrackForDownmix(pTrack, trackName);
+    if (DownmixerBufferProvider::isMultichannelCapable()) {
+        DownmixerBufferProvider* pDbp = new DownmixerBufferProvider(pTrack->channelMask,
+                /* pTrack->mMixerChannelMask */ audio_channel_out_mask_from_count(2),
+                /* pTrack->mMixerInFormat */ AUDIO_FORMAT_PCM_16_BIT,
+                pTrack->sampleRate, pTrack->sessionId, kCopyBufferFrameCount);
 
-    DownmixerBufferProvider* pDbp = new DownmixerBufferProvider();
-    int32_t status;
-
-    if (!sIsMultichannelCapable) {
-        ALOGE("prepareTrackForDownmix(%d) fails: mixer doesn't support multichannel content",
-                trackName);
-        goto noDownmixForActiveTrack;
+        if (pDbp->isValid()) { // if constructor completed properly
+            pTrack->mMixerInFormat = AUDIO_FORMAT_PCM_16_BIT; // PCM 16 bit required for downmix
+            pTrack->downmixerBufferProvider = pDbp;
+            reconfigureBufferProviders(pTrack);
+            return NO_ERROR;
+        }
+        delete pDbp;
     }
-
-    if (EffectCreate(&sDwnmFxDesc.uuid,
-            pTrack->sessionId /*sessionId*/, -2 /*ioId not relevant here, using random value*/,
-            &pDbp->mDownmixHandle/*pHandle*/) != 0) {
-        ALOGE("prepareTrackForDownmix(%d) fails: error creating downmixer effect", trackName);
-        goto noDownmixForActiveTrack;
-    }
-
-    // channel input configuration will be overridden per-track
-    pDbp->mDownmixConfig.inputCfg.channels = pTrack->channelMask;
-    pDbp->mDownmixConfig.outputCfg.channels = AUDIO_CHANNEL_OUT_STEREO;
-    pDbp->mDownmixConfig.inputCfg.format = AUDIO_FORMAT_PCM_16_BIT;
-    pDbp->mDownmixConfig.outputCfg.format = AUDIO_FORMAT_PCM_16_BIT;
-    pDbp->mDownmixConfig.inputCfg.samplingRate = pTrack->sampleRate;
-    pDbp->mDownmixConfig.outputCfg.samplingRate = pTrack->sampleRate;
-    pDbp->mDownmixConfig.inputCfg.accessMode = EFFECT_BUFFER_ACCESS_READ;
-    pDbp->mDownmixConfig.outputCfg.accessMode = EFFECT_BUFFER_ACCESS_WRITE;
-    // input and output buffer provider, and frame count will not be used as the downmix effect
-    // process() function is called directly (see DownmixerBufferProvider::getNextBuffer())
-    pDbp->mDownmixConfig.inputCfg.mask = EFFECT_CONFIG_SMP_RATE | EFFECT_CONFIG_CHANNELS |
-            EFFECT_CONFIG_FORMAT | EFFECT_CONFIG_ACC_MODE;
-    pDbp->mDownmixConfig.outputCfg.mask = pDbp->mDownmixConfig.inputCfg.mask;
-
-    {// scope for local variables that are not used in goto label "noDownmixForActiveTrack"
-        int cmdStatus;
-        uint32_t replySize = sizeof(int);
-
-        // Configure and enable downmixer
-        status = (*pDbp->mDownmixHandle)->command(pDbp->mDownmixHandle,
-                EFFECT_CMD_SET_CONFIG /*cmdCode*/, sizeof(effect_config_t) /*cmdSize*/,
-                &pDbp->mDownmixConfig /*pCmdData*/,
-                &replySize /*replySize*/, &cmdStatus /*pReplyData*/);
-        if ((status != 0) || (cmdStatus != 0)) {
-            ALOGE("error %d while configuring downmixer for track %d", status, trackName);
-            goto noDownmixForActiveTrack;
-        }
-        replySize = sizeof(int);
-        status = (*pDbp->mDownmixHandle)->command(pDbp->mDownmixHandle,
-                EFFECT_CMD_ENABLE /*cmdCode*/, 0 /*cmdSize*/, NULL /*pCmdData*/,
-                &replySize /*replySize*/, &cmdStatus /*pReplyData*/);
-        if ((status != 0) || (cmdStatus != 0)) {
-            ALOGE("error %d while enabling downmixer for track %d", status, trackName);
-            goto noDownmixForActiveTrack;
-        }
-
-        // Set downmix type
-        // parameter size rounded for padding on 32bit boundary
-        const int psizePadded = ((sizeof(downmix_params_t) - 1)/sizeof(int) + 1) * sizeof(int);
-        const int downmixParamSize =
-                sizeof(effect_param_t) + psizePadded + sizeof(downmix_type_t);
-        effect_param_t * const param = (effect_param_t *) malloc(downmixParamSize);
-        param->psize = sizeof(downmix_params_t);
-        const downmix_params_t downmixParam = DOWNMIX_PARAM_TYPE;
-        memcpy(param->data, &downmixParam, param->psize);
-        const downmix_type_t downmixType = DOWNMIX_TYPE_FOLD;
-        param->vsize = sizeof(downmix_type_t);
-        memcpy(param->data + psizePadded, &downmixType, param->vsize);
-
-        status = (*pDbp->mDownmixHandle)->command(pDbp->mDownmixHandle,
-                EFFECT_CMD_SET_PARAM /* cmdCode */, downmixParamSize/* cmdSize */,
-                param /*pCmndData*/, &replySize /*replySize*/, &cmdStatus /*pReplyData*/);
-
-        free(param);
-
-        if ((status != 0) || (cmdStatus != 0)) {
-            ALOGE("error %d while setting downmix type for track %d", status, trackName);
-            goto noDownmixForActiveTrack;
-        } else {
-            ALOGV("downmix type set to %d for track %d", (int) downmixType, trackName);
-        }
-    }// end of scope for local variables that are not used in goto label "noDownmixForActiveTrack"
-
-    // initialization successful:
-    pTrack->mMixerInFormat = AUDIO_FORMAT_PCM_16_BIT; // 16 bit input is required for downmix
-    pTrack->downmixerBufferProvider = pDbp;
-    reconfigureBufferProviders(pTrack);
-    return NO_ERROR;
-
-noDownmixForActiveTrack:
-    delete pDbp;
     pTrack->downmixerBufferProvider = NULL;
     reconfigureBufferProviders(pTrack);
     return NO_INIT;
@@ -521,7 +599,8 @@
     if (pTrack->mFormat != pTrack->mMixerInFormat) {
         pTrack->mReformatBufferProvider = new ReformatBufferProvider(
                 audio_channel_count_from_out_mask(pTrack->channelMask),
-                pTrack->mFormat, pTrack->mMixerInFormat);
+                pTrack->mFormat, pTrack->mMixerInFormat,
+                kCopyBufferFrameCount);
         reconfigureBufferProviders(pTrack);
     }
     return NO_ERROR;
@@ -531,11 +610,11 @@
 {
     pTrack->bufferProvider = pTrack->mInputBufferProvider;
     if (pTrack->mReformatBufferProvider) {
-        pTrack->mReformatBufferProvider->mTrackBufferProvider = pTrack->bufferProvider;
+        pTrack->mReformatBufferProvider->setBufferProvider(pTrack->bufferProvider);
         pTrack->bufferProvider = pTrack->mReformatBufferProvider;
     }
     if (pTrack->downmixerBufferProvider) {
-        pTrack->downmixerBufferProvider->mTrackBufferProvider = pTrack->bufferProvider;
+        pTrack->downmixerBufferProvider->setBufferProvider(pTrack->bufferProvider);
         pTrack->bufferProvider = pTrack->downmixerBufferProvider;
     }
 }
@@ -1780,29 +1859,9 @@
 /*static*/ void AudioMixer::sInitRoutine()
 {
     LocalClock lc;
-    sLocalTimeFreq = lc.getLocalFreq();
+    sLocalTimeFreq = lc.getLocalFreq(); // for the resampler
 
-    // find multichannel downmix effect if we have to play multichannel content
-    uint32_t numEffects = 0;
-    int ret = EffectQueryNumberEffects(&numEffects);
-    if (ret != 0) {
-        ALOGE("AudioMixer() error %d querying number of effects", ret);
-        return;
-    }
-    ALOGV("EffectQueryNumberEffects() numEffects=%d", numEffects);
-
-    for (uint32_t i = 0 ; i < numEffects ; i++) {
-        if (EffectQueryEffect(i, &sDwnmFxDesc) == 0) {
-            ALOGV("effect %d is called %s", i, sDwnmFxDesc.name);
-            if (memcmp(&sDwnmFxDesc.type, EFFECT_UIID_DOWNMIX, sizeof(effect_uuid_t)) == 0) {
-                ALOGI("found effect \"%s\" from %s",
-                        sDwnmFxDesc.name, sDwnmFxDesc.implementor);
-                sIsMultichannelCapable = true;
-                break;
-            }
-        }
-    }
-    ALOGW_IF(!sIsMultichannelCapable, "unable to find downmix effect");
+    DownmixerBufferProvider::init(); // for the downmixer
 }
 
 template <int MIXTYPE, int NCHAN, bool USEFLOATVOL, bool ADJUSTVOL,
diff --git a/services/audioflinger/AudioMixer.h b/services/audioflinger/AudioMixer.h
index a9f4761..09a4d89 100644
--- a/services/audioflinger/AudioMixer.h
+++ b/services/audioflinger/AudioMixer.h
@@ -153,8 +153,7 @@
 
     struct state_t;
     struct track_t;
-    class DownmixerBufferProvider;
-    class ReformatBufferProvider;
+    class CopyBufferProvider;
 
     typedef void (*hook_t)(track_t* t, int32_t* output, size_t numOutFrames, int32_t* temp,
                            int32_t* aux);
@@ -206,9 +205,9 @@
         int32_t*           auxBuffer;
 
         // 16-byte boundary
-        AudioBufferProvider*     mInputBufferProvider;    // 4 bytes
-        ReformatBufferProvider*  mReformatBufferProvider; // 4 bytes
-        DownmixerBufferProvider* downmixerBufferProvider; // 4 bytes
+        AudioBufferProvider*     mInputBufferProvider;    // externally provided buffer provider.
+        CopyBufferProvider*      mReformatBufferProvider; // provider wrapper for reformatting.
+        CopyBufferProvider*      downmixerBufferProvider; // wrapper for channel conversion.
 
         int32_t     sessionId;
 
@@ -253,48 +252,112 @@
         track_t         tracks[MAX_NUM_TRACKS] __attribute__((aligned(32)));
     };
 
-    // AudioBufferProvider that wraps a track AudioBufferProvider by a call to a downmix effect
-    class DownmixerBufferProvider : public AudioBufferProvider {
+    // Base AudioBufferProvider class used for DownMixerBufferProvider, RemixBufferProvider,
+    // and ReformatBufferProvider.
+    // It handles a private buffer for use in converting format or channel masks from the
+    // input data to a form acceptable by the mixer.
+    // TODO: Make a ResamplerBufferProvider when integers are entirely removed from the
+    // processing pipeline.
+    class CopyBufferProvider : public AudioBufferProvider {
     public:
-        virtual status_t getNextBuffer(Buffer* buffer, int64_t pts);
-        virtual void releaseBuffer(Buffer* buffer);
-        DownmixerBufferProvider();
-        virtual ~DownmixerBufferProvider();
+        // Use a private buffer of bufferFrameCount frames (each frame is outputFrameSize bytes).
+        // If bufferFrameCount is 0, no private buffer is created and in-place modification of
+        // the upstream buffer provider's buffers is performed by copyFrames().
+        CopyBufferProvider(size_t inputFrameSize, size_t outputFrameSize,
+                size_t bufferFrameCount);
+        virtual ~CopyBufferProvider();
 
-        AudioBufferProvider* mTrackBufferProvider;
-        effect_handle_t    mDownmixHandle;
-        effect_config_t    mDownmixConfig;
-    };
-
-    // AudioBufferProvider wrapper that reformats track to acceptable mixer input type
-    class ReformatBufferProvider : public AudioBufferProvider {
-    public:
-        ReformatBufferProvider(int32_t channels,
-                audio_format_t inputFormat, audio_format_t outputFormat);
-        virtual ~ReformatBufferProvider();
-
-        // overrides AudioBufferProvider methods
+        // Overrides AudioBufferProvider methods
         virtual status_t getNextBuffer(Buffer* buffer, int64_t pts);
         virtual void releaseBuffer(Buffer* buffer);
 
-        void reset();
-        inline bool requiresInternalBuffers() {
-            return true; //mInputFrameSize < mOutputFrameSize;
+        // Other public methods
+
+        // call this to release the buffer to the upstream provider.
+        // treat it as an audio discontinuity for future samples.
+        virtual void reset();
+
+        // this function should be supplied by the derived class.  It converts
+        // #frames in the *src pointer to the *dst pointer.  It is public because
+        // some providers will allow this to work on arbitrary buffers outside
+        // of the internal buffers.
+        virtual void copyFrames(void *dst, const void *src, size_t frames) = 0;
+
+        // set the upstream buffer provider. Consider calling "reset" before this function.
+        void setBufferProvider(AudioBufferProvider *p) {
+            mTrackBufferProvider = p;
         }
 
+    protected:
         AudioBufferProvider* mTrackBufferProvider;
-        int32_t              mChannels;
-        audio_format_t       mInputFormat;
-        audio_format_t       mOutputFormat;
-        size_t               mInputFrameSize;
-        size_t               mOutputFrameSize;
-        // (only) required for reformatting to a larger size.
+        const size_t         mInputFrameSize;
+        const size_t         mOutputFrameSize;
+    private:
         AudioBufferProvider::Buffer mBuffer;
-        void*                mOutputData;
-        size_t               mOutputCount;
+        const size_t         mLocalBufferFrameCount;
+        void*                mLocalBufferData;
         size_t               mConsumed;
     };
 
+    // DownmixerBufferProvider wraps a track AudioBufferProvider to provide
+    // position dependent downmixing by an Audio Effect.
+    class DownmixerBufferProvider : public CopyBufferProvider {
+    public:
+        DownmixerBufferProvider(audio_channel_mask_t inputChannelMask,
+                audio_channel_mask_t outputChannelMask, audio_format_t format,
+                uint32_t sampleRate, int32_t sessionId, size_t bufferFrameCount);
+        virtual ~DownmixerBufferProvider();
+        virtual void copyFrames(void *dst, const void *src, size_t frames);
+        bool isValid() const { return mDownmixHandle != NULL; }
+
+        static status_t init();
+        static bool isMultichannelCapable() { return sIsMultichannelCapable; }
+
+    protected:
+        effect_handle_t    mDownmixHandle;
+        effect_config_t    mDownmixConfig;
+
+        // effect descriptor for the downmixer used by the mixer
+        static effect_descriptor_t sDwnmFxDesc;
+        // indicates whether a downmix effect has been found and is usable by this mixer
+        static bool                sIsMultichannelCapable;
+        // FIXME: should we allow effects outside of the framework?
+        // We need to here. A special ioId that must be <= -2 so it does not map to a session.
+        static const int32_t SESSION_ID_INVALID_AND_IGNORED = -2;
+    };
+
+    // RemixBufferProvider wraps a track AudioBufferProvider to perform an
+    // upmix or downmix to the proper channel count and mask.
+    class RemixBufferProvider : public CopyBufferProvider {
+    public:
+        RemixBufferProvider(audio_channel_mask_t inputChannelMask,
+                audio_channel_mask_t outputChannelMask, audio_format_t format,
+                size_t bufferFrameCount);
+        virtual void copyFrames(void *dst, const void *src, size_t frames);
+
+    protected:
+        const audio_format_t mFormat;
+        const size_t         mSampleSize;
+        const size_t         mInputChannels;
+        const size_t         mOutputChannels;
+        int8_t               mIdxAry[sizeof(uint32_t)*8]; // 32 bits => channel indices
+    };
+
+    // ReformatBufferProvider wraps a track AudioBufferProvider to convert the input data
+    // to an acceptable mixer input format type.
+    class ReformatBufferProvider : public CopyBufferProvider {
+    public:
+        ReformatBufferProvider(int32_t channels,
+                audio_format_t inputFormat, audio_format_t outputFormat,
+                size_t bufferFrameCount);
+        virtual void copyFrames(void *dst, const void *src, size_t frames);
+
+    protected:
+        const int32_t        mChannels;
+        const audio_format_t mInputFormat;
+        const audio_format_t mOutputFormat;
+    };
+
     // bitmask of allocated track names, where bit 0 corresponds to TRACK0 etc.
     uint32_t        mTrackNames;
 
@@ -310,11 +373,6 @@
 private:
     state_t         mState __attribute__((aligned(32)));
 
-    // effect descriptor for the downmixer used by the mixer
-    static effect_descriptor_t sDwnmFxDesc;
-    // indicates whether a downmix effect has been found and is usable by this mixer
-    static bool                sIsMultichannelCapable;
-
     // Call after changing either the enabled status of a track, or parameters of an enabled track.
     // OK to call more often than that, but unnecessary.
     void invalidateState(uint32_t mask);
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
old mode 100755
new mode 100644
index e17aa98..e0b664b
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -3643,7 +3643,7 @@
             memset(mEffectBuffer, 0, mEffectBufferSize);
         }
         // FIXME as a performance optimization, should remember previous zero status
-        memset(mSinkBuffer, 0, mNormalFrameCount * mChannelCount * sizeof(int16_t));
+        memset(mSinkBuffer, 0, mNormalFrameCount * mFrameSize);
     }
 
     // if any fast tracks, then status is ready
@@ -5458,21 +5458,14 @@
     // client expresses a preference for FAST, but we get the final say
     if (*flags & IAudioFlinger::TRACK_FAST) {
       if (
-            // use case: callback handler and frame count is default or at least as large as HAL
-            (
-                (tid != -1) &&
-                ((frameCount == 0) /*||
-                // FIXME must be equal to pipe depth, so don't allow it to be specified by client
-                // FIXME not necessarily true, should be native frame count for native SR!
-                (frameCount >= mFrameCount)*/)
-            ) &&
+            // use case: callback handler
+            (tid != -1) &&
+            // frame count is not specified, or is exactly the pipe depth
+            ((frameCount == 0) || (frameCount == mPipeFramesP2)) &&
             // PCM data
             audio_is_linear_pcm(format) &&
             // native format
             (format == mFormat) &&
-            // mono or stereo
-            ( (channelMask == AUDIO_CHANNEL_IN_MONO) ||
-              (channelMask == AUDIO_CHANNEL_IN_STEREO) ) &&
             // native channel mask
             (channelMask == mChannelMask) &&
             // native hardware sample rate
@@ -5482,40 +5475,43 @@
             // there are sufficient fast track slots available
             mFastTrackAvail
         ) {
-        // if frameCount not specified, then it defaults to pipe frame count
-        if (frameCount == 0) {
-            frameCount = mPipeFramesP2;
-        }
-        ALOGV("AUDIO_INPUT_FLAG_FAST accepted: frameCount=%d mFrameCount=%d",
+        ALOGV("AUDIO_INPUT_FLAG_FAST accepted: frameCount=%u mFrameCount=%u",
                 frameCount, mFrameCount);
       } else {
-        ALOGV("AUDIO_INPUT_FLAG_FAST denied: frameCount=%d "
-                "mFrameCount=%d format=%d isLinear=%d channelMask=%#x sampleRate=%u mSampleRate=%u "
+        ALOGV("AUDIO_INPUT_FLAG_FAST denied: frameCount=%u mFrameCount=%u mPipeFramesP2=%u "
+                "format=%#x isLinear=%d channelMask=%#x sampleRate=%u mSampleRate=%u "
                 "hasFastCapture=%d tid=%d mFastTrackAvail=%d",
-                frameCount, mFrameCount, format,
-                audio_is_linear_pcm(format),
-                channelMask, sampleRate, mSampleRate, hasFastCapture(), tid, mFastTrackAvail);
+                frameCount, mFrameCount, mPipeFramesP2,
+                format, audio_is_linear_pcm(format), channelMask, sampleRate, mSampleRate,
+                hasFastCapture(), tid, mFastTrackAvail);
         *flags &= ~IAudioFlinger::TRACK_FAST;
-        // FIXME It's not clear that we need to enforce this any more, since we have a pipe.
-        // For compatibility with AudioRecord calculation, buffer depth is forced
-        // to be at least 2 x the record thread frame count and cover audio hardware latency.
-        // This is probably too conservative, but legacy application code may depend on it.
-        // If you change this calculation, also review the start threshold which is related.
-        // FIXME It's not clear how input latency actually matters.  Perhaps this should be 0.
-        uint32_t latencyMs = 50; // FIXME mInput->stream->get_latency(mInput->stream);
-        size_t mNormalFrameCount = 2048; // FIXME
-        uint32_t minBufCount = latencyMs / ((1000 * mNormalFrameCount) / mSampleRate);
-        if (minBufCount < 2) {
-            minBufCount = 2;
-        }
-        size_t minFrameCount = mNormalFrameCount * minBufCount;
+      }
+    }
+
+    // compute track buffer size in frames, and suggest the notification frame count
+    if (*flags & IAudioFlinger::TRACK_FAST) {
+        // fast track: frame count is exactly the pipe depth
+        frameCount = mPipeFramesP2;
+        // ignore requested notificationFrames, and always notify exactly once every HAL buffer
+        *notificationFrames = mFrameCount;
+    } else {
+        // not fast track: frame count is at least 2 HAL buffers and at least 20 ms
+        size_t minFrameCount = ((int64_t) mFrameCount * 2 * sampleRate + mSampleRate - 1) /
+                mSampleRate;
         if (frameCount < minFrameCount) {
             frameCount = minFrameCount;
         }
-      }
+        minFrameCount = (sampleRate * 20 / 1000 + 1) & ~1;
+        if (frameCount < minFrameCount) {
+            frameCount = minFrameCount;
+        }
+        // notification is forced to be at least double-buffering
+        size_t maxNotification = frameCount / 2;
+        if (*notificationFrames == 0 || *notificationFrames > maxNotification) {
+            *notificationFrames = maxNotification;
+        }
     }
     *pFrameCount = frameCount;
-    *notificationFrames = 0;    // FIXME implement
 
     lStatus = initCheck();
     if (lStatus != NO_ERROR) {
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index eb3e6b4..af761e4 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -1630,12 +1630,11 @@
                 frameCount, mChannelMask);
         // since client and server are in the same process,
         // the buffer has the same virtual address on both sides
-        mClientProxy = new AudioTrackClientProxy(mCblk, mBuffer, mFrameCount, mFrameSize);
+        mClientProxy = new AudioTrackClientProxy(mCblk, mBuffer, mFrameCount, mFrameSize,
+                true /*clientInServer*/);
         mClientProxy->setVolumeLR(GAIN_MINIFLOAT_PACKED_UNITY);
         mClientProxy->setSendLevel(0.0);
         mClientProxy->setSampleRate(sampleRate);
-        mClientProxy = new AudioTrackClientProxy(mCblk, mBuffer, mFrameCount, mFrameSize,
-                true /*clientInServer*/);
     } else {
         ALOGW("Error creating output track on thread %p", playbackThread);
     }
diff --git a/services/audiopolicy/Android.mk b/services/audiopolicy/Android.mk
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/AudioPolicyClientImpl.cpp b/services/audiopolicy/AudioPolicyClientImpl.cpp
index c322d92..b5af089 100644
--- a/services/audiopolicy/AudioPolicyClientImpl.cpp
+++ b/services/audiopolicy/AudioPolicyClientImpl.cpp
@@ -101,7 +101,8 @@
                               audio_devices_t *pDevices,
                               uint32_t *pSamplingRate,
                               audio_format_t *pFormat,
-                              audio_channel_mask_t *pChannelMask)
+                              audio_channel_mask_t *pChannelMask,
+                              audio_input_flags_t flags)
 {
     sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
     if (af == 0) {
@@ -109,7 +110,7 @@
         return 0;
     }
 
-    return af->openInput(module, pDevices, pSamplingRate, pFormat, pChannelMask);
+    return af->openInput(module, pDevices, pSamplingRate, pFormat, pChannelMask, flags);
 }
 
 status_t AudioPolicyService::AudioPolicyClient::closeInput(audio_io_handle_t input)
diff --git a/services/audiopolicy/AudioPolicyClientImplLegacy.cpp b/services/audiopolicy/AudioPolicyClientImplLegacy.cpp
index 53f3e2d..97e12cc 100644
--- a/services/audiopolicy/AudioPolicyClientImplLegacy.cpp
+++ b/services/audiopolicy/AudioPolicyClientImplLegacy.cpp
@@ -158,7 +158,8 @@
         return 0;
     }
 
-    return af->openInput((audio_module_handle_t)0, pDevices, pSamplingRate, pFormat, pChannelMask);
+    return af->openInput((audio_module_handle_t)0, pDevices, pSamplingRate, pFormat, pChannelMask,
+            AUDIO_INPUT_FLAG_FAST /*FIXME*/);
 }
 
 audio_io_handle_t aps_open_input_on_module(void *service __unused,
@@ -174,7 +175,8 @@
         return 0;
     }
 
-    return af->openInput(module, pDevices, pSamplingRate, pFormat, pChannelMask);
+    return af->openInput(module, pDevices, pSamplingRate, pFormat, pChannelMask,
+            AUDIO_INPUT_FLAG_FAST /*FIXME*/);
 }
 
 int aps_close_input(void *service __unused, audio_io_handle_t input)
diff --git a/services/audiopolicy/AudioPolicyEffects.cpp b/services/audiopolicy/AudioPolicyEffects.cpp
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/AudioPolicyEffects.h b/services/audiopolicy/AudioPolicyEffects.h
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/AudioPolicyInterface.h b/services/audiopolicy/AudioPolicyInterface.h
index 33e4397..ed66e58 100644
--- a/services/audiopolicy/AudioPolicyInterface.h
+++ b/services/audiopolicy/AudioPolicyInterface.h
@@ -238,7 +238,8 @@
                                         audio_devices_t *pDevices,
                                         uint32_t *pSamplingRate,
                                         audio_format_t *pFormat,
-                                        audio_channel_mask_t *pChannelMask) = 0;
+                                        audio_channel_mask_t *pChannelMask,
+                                        audio_input_flags_t flags) = 0;
     // closes an audio input
     virtual status_t closeInput(audio_io_handle_t input) = 0;
     //
diff --git a/services/audiopolicy/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/AudioPolicyInterfaceImpl.cpp
old mode 100755
new mode 100644
index 5a13ac2..a41721f
--- a/services/audiopolicy/AudioPolicyInterfaceImpl.cpp
+++ b/services/audiopolicy/AudioPolicyInterfaceImpl.cpp
@@ -214,7 +214,8 @@
                                     uint32_t samplingRate,
                                     audio_format_t format,
                                     audio_channel_mask_t channelMask,
-                                    int audioSession)
+                                    int audioSession,
+                                    audio_input_flags_t flags __unused)
 {
     if (mAudioPolicyManager == NULL) {
         return 0;
diff --git a/services/audiopolicy/AudioPolicyInterfaceImplLegacy.cpp b/services/audiopolicy/AudioPolicyInterfaceImplLegacy.cpp
old mode 100755
new mode 100644
index 0a246f2..5ef02e5
--- a/services/audiopolicy/AudioPolicyInterfaceImplLegacy.cpp
+++ b/services/audiopolicy/AudioPolicyInterfaceImplLegacy.cpp
@@ -202,7 +202,8 @@
                                     uint32_t samplingRate,
                                     audio_format_t format,
                                     audio_channel_mask_t channelMask,
-                                    int audioSession)
+                                    int audioSession,
+                                    audio_input_flags_t flags __unused)
 {
     if (mpAudioPolicy == NULL) {
         return 0;
diff --git a/services/audiopolicy/AudioPolicyManager.cpp b/services/audiopolicy/AudioPolicyManager.cpp
index f2320de..8783ec9 100644
--- a/services/audiopolicy/AudioPolicyManager.cpp
+++ b/services/audiopolicy/AudioPolicyManager.cpp
@@ -1111,7 +1111,8 @@
                                     &inputDesc->mDevice,
                                     &inputDesc->mSamplingRate,
                                     &inputDesc->mFormat,
-                                    &inputDesc->mChannelMask);
+                                    &inputDesc->mChannelMask,
+                                    AUDIO_INPUT_FLAG_FAST /*FIXME*/);
 
     // only accept input with the exact requested set of parameters
     if (input == 0 ||
@@ -2332,7 +2333,8 @@
                                                     &inputDesc->mDevice,
                                                     &inputDesc->mSamplingRate,
                                                     &inputDesc->mFormat,
-                                                    &inputDesc->mChannelMask);
+                                                    &inputDesc->mChannelMask,
+                                                    AUDIO_INPUT_FLAG_FAST /*FIXME*/);
 
                 if (input != 0) {
                     for (size_t k = 0; k  < inProfile->mSupportedDevices.size(); k++) {
@@ -2898,7 +2900,8 @@
                                             &desc->mDevice,
                                             &desc->mSamplingRate,
                                             &desc->mFormat,
-                                            &desc->mChannelMask);
+                                            &desc->mChannelMask,
+                                            AUDIO_INPUT_FLAG_FAST /*FIXME*/);
 
             if (input != 0) {
                 if (!address.isEmpty()) {
@@ -3830,6 +3833,11 @@
         if (!deviceList.isEmpty()) {
             struct audio_patch patch;
             inputDesc->toAudioPortConfig(&patch.sinks[0]);
+            // AUDIO_SOURCE_HOTWORD is for internal use only:
+            // handled as AUDIO_SOURCE_VOICE_RECOGNITION by the audio HAL
+            if (patch.sinks[0].ext.mix.usecase.source == AUDIO_SOURCE_HOTWORD) {
+                patch.sinks[0].ext.mix.usecase.source = AUDIO_SOURCE_VOICE_RECOGNITION;
+            }
             patch.num_sinks = 1;
             //only one input device for now
             deviceList.itemAt(0)->toAudioPortConfig(&patch.sources[0]);
@@ -5313,7 +5321,9 @@
 const audio_format_t AudioPolicyManager::AudioPort::sPcmFormatCompareTable[] = {
         AUDIO_FORMAT_DEFAULT,
         AUDIO_FORMAT_PCM_16_BIT,
+        AUDIO_FORMAT_PCM_8_24_BIT,
         AUDIO_FORMAT_PCM_24_BIT_PACKED,
+        AUDIO_FORMAT_PCM_32_BIT,
 };
 
 int AudioPolicyManager::AudioPort::compareFormats(audio_format_t format1,
diff --git a/services/audiopolicy/AudioPolicyService.cpp b/services/audiopolicy/AudioPolicyService.cpp
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/AudioPolicyService.h b/services/audiopolicy/AudioPolicyService.h
old mode 100755
new mode 100644
index 380fd5e..08942ee
--- a/services/audiopolicy/AudioPolicyService.h
+++ b/services/audiopolicy/AudioPolicyService.h
@@ -86,10 +86,11 @@
                                 int session = 0);
     virtual void releaseOutput(audio_io_handle_t output);
     virtual audio_io_handle_t getInput(audio_source_t inputSource,
-                                    uint32_t samplingRate = 0,
-                                    audio_format_t format = AUDIO_FORMAT_DEFAULT,
-                                    audio_channel_mask_t channelMask = 0,
-                                    int audioSession = 0);
+                                    uint32_t samplingRate,
+                                    audio_format_t format,
+                                    audio_channel_mask_t channelMask,
+                                    int audioSession,
+                                    audio_input_flags_t flags);
     virtual status_t startInput(audio_io_handle_t input);
     virtual status_t stopInput(audio_io_handle_t input);
     virtual void releaseInput(audio_io_handle_t input);
@@ -388,7 +389,8 @@
                                             audio_devices_t *pDevices,
                                             uint32_t *pSamplingRate,
                                             audio_format_t *pFormat,
-                                            audio_channel_mask_t *pChannelMask);
+                                            audio_channel_mask_t *pChannelMask,
+                                            audio_input_flags_t flags);
         // closes an audio input
         virtual status_t closeInput(audio_io_handle_t input);
         //
diff --git a/services/camera/libcameraservice/api1/Camera2Client.cpp b/services/camera/libcameraservice/api1/Camera2Client.cpp
index 1642896..8075515 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.cpp
+++ b/services/camera/libcameraservice/api1/Camera2Client.cpp
@@ -825,6 +825,7 @@
         }
         outputStreams.push(getZslStreamId());
     } else {
+        mZslProcessor->clearZslQueue();
         mZslProcessor->deleteStream();
     }
 
@@ -906,6 +907,13 @@
                 ALOGE("%s: Camera %d: Waiting to stop streaming failed: %s (%d)",
                         __FUNCTION__, mCameraId, strerror(-res), res);
             }
+            // Clean up recording stream
+            res = mStreamingProcessor->deleteRecordingStream();
+            if (res != OK) {
+                ALOGE("%s: Camera %d: Unable to delete recording stream before "
+                        "stop preview: %s (%d)",
+                        __FUNCTION__, mCameraId, strerror(-res), res);
+            }
             // no break
         case Parameters::WAITING_FOR_PREVIEW_WINDOW: {
             SharedParameters::Lock l(mParameters);
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index 3004d3e..9d36bfa 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -1794,8 +1794,9 @@
                     return;
                 }
                 isPartialResult = (result->partial_result < mNumPartialResults);
-                request.partialResult.collectedResult.append(
-                    result->result);
+                if (isPartialResult) {
+                    request.partialResult.collectedResult.append(result->result);
+                }
             } else {
                 camera_metadata_ro_entry_t partialResultEntry;
                 res = find_camera_metadata_ro_entry(result->result,