Merge "cameraservice: remove record stream in stopPreview" into lmp-dev
diff --git a/camera/Android.mk b/camera/Android.mk
index c10e38a..bbdb47d 100644
--- a/camera/Android.mk
+++ b/camera/Android.mk
@@ -36,6 +36,7 @@
 	camera2/CaptureRequest.cpp \
 	ProCamera.cpp \
 	CameraBase.cpp \
+	CameraUtils.cpp \
 	VendorTagDescriptor.cpp
 
 LOCAL_SHARED_LIBRARIES := \
diff --git a/camera/CameraUtils.cpp b/camera/CameraUtils.cpp
new file mode 100644
index 0000000..3ff181d
--- /dev/null
+++ b/camera/CameraUtils.cpp
@@ -0,0 +1,118 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "CameraUtils"
+//#define LOG_NDEBUG 0
+
+#include <camera/CameraUtils.h>
+
+#include <system/window.h>
+#include <system/graphics.h>
+
+#include <utils/Log.h>
+
+namespace android {
+
+status_t CameraUtils::getRotationTransform(const CameraMetadata& staticInfo,
+                /*out*/int32_t* transform) {
+    ALOGV("%s", __FUNCTION__);
+
+    if (transform == NULL) {
+        ALOGW("%s: null transform", __FUNCTION__);
+        return BAD_VALUE;
+    }
+
+    *transform = 0;
+
+    camera_metadata_ro_entry_t entry = staticInfo.find(ANDROID_SENSOR_ORIENTATION);
+    if (entry.count == 0) {
+        ALOGE("%s: Can't find android.sensor.orientation in static metadata!", __FUNCTION__);
+        return INVALID_OPERATION;
+    }
+
+    camera_metadata_ro_entry_t entryFacing = staticInfo.find(ANDROID_LENS_FACING);
+    if (entry.count == 0) {
+        ALOGE("%s: Can't find android.lens.facing in static metadata!", __FUNCTION__);
+        return INVALID_OPERATION;
+    }
+
+    int32_t& flags = *transform;
+
+    bool mirror = (entryFacing.data.u8[0] == ANDROID_LENS_FACING_FRONT);
+    int orientation = entry.data.i32[0];
+    if (!mirror) {
+        switch (orientation) {
+            case 0:
+                flags = 0;
+                break;
+            case 90:
+                flags = NATIVE_WINDOW_TRANSFORM_ROT_90;
+                break;
+            case 180:
+                flags = NATIVE_WINDOW_TRANSFORM_ROT_180;
+                break;
+            case 270:
+                flags = NATIVE_WINDOW_TRANSFORM_ROT_270;
+                break;
+            default:
+                ALOGE("%s: Invalid HAL android.sensor.orientation value: %d",
+                      __FUNCTION__, orientation);
+                return INVALID_OPERATION;
+        }
+    } else {
+        switch (orientation) {
+            case 0:
+                flags = HAL_TRANSFORM_FLIP_H;
+                break;
+            case 90:
+                flags = HAL_TRANSFORM_FLIP_H | HAL_TRANSFORM_ROT_90;
+                break;
+            case 180:
+                flags = HAL_TRANSFORM_FLIP_V;
+                break;
+            case 270:
+                flags = HAL_TRANSFORM_FLIP_V | HAL_TRANSFORM_ROT_90;
+                break;
+            default:
+                ALOGE("%s: Invalid HAL android.sensor.orientation value: %d",
+                      __FUNCTION__, orientation);
+                return INVALID_OPERATION;
+        }
+
+    }
+
+    /**
+     * This magic flag makes surfaceflinger un-rotate the buffers
+     * to counter the extra global device UI rotation whenever the user
+     * physically rotates the device.
+     *
+     * By doing this, the camera buffer always ends up aligned
+     * with the physical camera for a "see through" effect.
+     *
+     * In essence, the buffer only gets rotated during preview use-cases.
+     * The user is still responsible to re-create streams of the proper
+     * aspect ratio, or the preview will end up looking non-uniformly
+     * stretched.
+     */
+    flags |= NATIVE_WINDOW_TRANSFORM_INVERSE_DISPLAY;
+
+    ALOGV("%s: final transform = 0x%x", __FUNCTION__, flags);
+
+    return OK;
+}
+
+
+} /* namespace android */
diff --git a/camera/CaptureResult.cpp b/camera/CaptureResult.cpp
index c016e52..4e36160 100644
--- a/camera/CaptureResult.cpp
+++ b/camera/CaptureResult.cpp
@@ -37,6 +37,7 @@
     parcel->readInt32(&afTriggerId);
     parcel->readInt32(&precaptureTriggerId);
     parcel->readInt64(&frameNumber);
+    parcel->readInt32(&partialResultCount);
 
     return OK;
 }
@@ -52,6 +53,7 @@
     parcel->writeInt32(afTriggerId);
     parcel->writeInt32(precaptureTriggerId);
     parcel->writeInt64(frameNumber);
+    parcel->writeInt32(partialResultCount);
 
     return OK;
 }
diff --git a/include/camera/CameraUtils.h b/include/camera/CameraUtils.h
new file mode 100644
index 0000000..c06f05d
--- /dev/null
+++ b/include/camera/CameraUtils.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_CAMERA_CLIENT_CAMERAUTILS_H
+#define ANDROID_CAMERA_CLIENT_CAMERAUTILS_H
+
+#include <camera/CameraMetadata.h>
+#include <utils/Errors.h>
+
+#include <stdint.h>
+
+namespace android {
+
+/**
+ * CameraUtils contains utility methods that are shared between the native
+ * camera client, and the camera service.
+ */
+class CameraUtils {
+    public:
+        /**
+         * Calculate the ANativeWindow transform from the static camera
+         * metadata.  This is based on the sensor orientation and lens facing
+         * attributes of the camera device.
+         *
+         * Returns OK on success, or a negative error code.
+         */
+        static status_t getRotationTransform(const CameraMetadata& staticInfo,
+                /*out*/int32_t* transform);
+    private:
+        CameraUtils();
+};
+
+} /* namespace android */
+
+#endif /* ANDROID_CAMERA_CLIENT_CAMERAUTILS_H */
+
diff --git a/include/camera/CaptureResult.h b/include/camera/CaptureResult.h
index 6e47a16..0be7d6f 100644
--- a/include/camera/CaptureResult.h
+++ b/include/camera/CaptureResult.h
@@ -53,6 +53,11 @@
     int64_t frameNumber;
 
     /**
+     * The partial result count (index) for this capture result.
+     */
+    int32_t partialResultCount;
+
+    /**
      * Constructor initializes object as invalid by setting requestId to be -1.
      */
     CaptureResultExtras()
@@ -60,7 +65,8 @@
           burstId(0),
           afTriggerId(0),
           precaptureTriggerId(0),
-          frameNumber(0) {
+          frameNumber(0),
+          partialResultCount(0) {
     }
 
     /**
diff --git a/include/media/AudioPolicyHelper.h b/include/media/AudioPolicyHelper.h
new file mode 100644
index 0000000..f4afd45
--- /dev/null
+++ b/include/media/AudioPolicyHelper.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef AUDIO_POLICY_HELPER_H_
+#define AUDIO_POLICY_HELPER_H_
+
+#include <system/audio.h>
+
+audio_stream_type_t audio_attributes_to_stream_type(const audio_attributes_t *attr)
+{
+    // flags to stream type mapping
+    if ((attr->flags & AUDIO_FLAG_AUDIBILITY_ENFORCED) == AUDIO_FLAG_AUDIBILITY_ENFORCED) {
+        return AUDIO_STREAM_ENFORCED_AUDIBLE;
+    }
+    if ((attr->flags & AUDIO_FLAG_SCO) == AUDIO_FLAG_SCO) {
+        return AUDIO_STREAM_BLUETOOTH_SCO;
+    }
+
+    // usage to stream type mapping
+    switch (attr->usage) {
+    case AUDIO_USAGE_MEDIA:
+    case AUDIO_USAGE_GAME:
+    case AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY:
+    case AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE:
+        return AUDIO_STREAM_MUSIC;
+    case AUDIO_USAGE_ASSISTANCE_SONIFICATION:
+        return AUDIO_STREAM_SYSTEM;
+    case AUDIO_USAGE_VOICE_COMMUNICATION:
+        return AUDIO_STREAM_VOICE_CALL;
+
+    case AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING:
+        return AUDIO_STREAM_DTMF;
+
+    case AUDIO_USAGE_ALARM:
+        return AUDIO_STREAM_ALARM;
+    case AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE:
+        return AUDIO_STREAM_RING;
+
+    case AUDIO_USAGE_NOTIFICATION:
+    case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST:
+    case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT:
+    case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED:
+    case AUDIO_USAGE_NOTIFICATION_EVENT:
+        return AUDIO_STREAM_NOTIFICATION;
+
+    case AUDIO_USAGE_UNKNOWN:
+    default:
+        return AUDIO_STREAM_MUSIC;
+    }
+}
+
+#endif //AUDIO_POLICY_HELPER_H_
diff --git a/include/media/AudioRecord.h b/include/media/AudioRecord.h
index f9c7efd..4edc1bf 100644
--- a/include/media/AudioRecord.h
+++ b/include/media/AudioRecord.h
@@ -461,6 +461,7 @@
     // for notification APIs
     uint32_t                mNotificationFramesReq; // requested number of frames between each
                                                     // notification callback
+                                                    // as specified in constructor or set()
     uint32_t                mNotificationFramesAct; // actual number of frames between each
                                                     // notification callback
     bool                    mRefreshRemaining;      // processAudioBuffer() should refresh
diff --git a/include/media/AudioSystem.h b/include/media/AudioSystem.h
index e1aab41..9ea18de 100644
--- a/include/media/AudioSystem.h
+++ b/include/media/AudioSystem.h
@@ -234,7 +234,8 @@
                                     uint32_t samplingRate,
                                     audio_format_t format,
                                     audio_channel_mask_t channelMask,
-                                    int sessionId);
+                                    int sessionId,
+                                    audio_input_flags_t);
 
     static status_t startInput(audio_io_handle_t input);
     static status_t stopInput(audio_io_handle_t input);
diff --git a/include/media/IAudioFlinger.h b/include/media/IAudioFlinger.h
index a8f4605..31312d3 100644
--- a/include/media/IAudioFlinger.h
+++ b/include/media/IAudioFlinger.h
@@ -163,7 +163,8 @@
                                         audio_devices_t *pDevices,
                                         uint32_t *pSamplingRate,
                                         audio_format_t *pFormat,
-                                        audio_channel_mask_t *pChannelMask) = 0;
+                                        audio_channel_mask_t *pChannelMask,
+                                        audio_input_flags_t flags) = 0;
     virtual status_t closeInput(audio_io_handle_t input) = 0;
 
     virtual status_t invalidateStream(audio_stream_type_t stream) = 0;
diff --git a/include/media/IAudioPolicyService.h b/include/media/IAudioPolicyService.h
index 959e4c3..e08b5ae 100644
--- a/include/media/IAudioPolicyService.h
+++ b/include/media/IAudioPolicyService.h
@@ -70,10 +70,11 @@
                                 int session = 0) = 0;
     virtual void releaseOutput(audio_io_handle_t output) = 0;
     virtual audio_io_handle_t getInput(audio_source_t inputSource,
-                                    uint32_t samplingRate = 0,
-                                    audio_format_t format = AUDIO_FORMAT_DEFAULT,
-                                    audio_channel_mask_t channelMask = 0,
-                                    int audioSession = 0) = 0;
+                                    uint32_t samplingRate,
+                                    audio_format_t format,
+                                    audio_channel_mask_t channelMask,
+                                    int audioSession,
+                                    audio_input_flags_t flags) = 0;
     virtual status_t startInput(audio_io_handle_t input) = 0;
     virtual status_t stopInput(audio_io_handle_t input) = 0;
     virtual void releaseInput(audio_io_handle_t input) = 0;
diff --git a/include/media/SoundPool.h b/include/media/SoundPool.h
index 2dd78cc..5830475 100644
--- a/include/media/SoundPool.h
+++ b/include/media/SoundPool.h
@@ -167,7 +167,7 @@
     friend class SoundPoolThread;
     friend class SoundChannel;
 public:
-    SoundPool(int maxChannels, audio_stream_type_t streamType, int srcQuality);
+    SoundPool(int maxChannels, const audio_attributes_t* pAttributes);
     ~SoundPool();
     int load(const char* url, int priority);
     int load(int fd, int64_t offset, int64_t length, int priority);
@@ -183,8 +183,7 @@
     void setPriority(int channelID, int priority);
     void setLoop(int channelID, int loop);
     void setRate(int channelID, float rate);
-    audio_stream_type_t streamType() const { return mStreamType; }
-    int srcQuality() const { return mSrcQuality; }
+    const audio_attributes_t* attributes() { return &mAttributes; }
 
     // called from SoundPoolThread
     void sampleLoaded(int sampleID);
@@ -225,8 +224,7 @@
     List<SoundChannel*>     mStop;
     DefaultKeyedVector< int, sp<Sample> >   mSamples;
     int                     mMaxChannels;
-    audio_stream_type_t     mStreamType;
-    int                     mSrcQuality;
+    audio_attributes_t      mAttributes;
     int                     mAllocated;
     int                     mNextSampleID;
     int                     mNextChannelID;
diff --git a/include/media/stagefright/MediaBufferGroup.h b/include/media/stagefright/MediaBufferGroup.h
index 0488292..a006f7f 100644
--- a/include/media/stagefright/MediaBufferGroup.h
+++ b/include/media/stagefright/MediaBufferGroup.h
@@ -34,9 +34,12 @@
 
     void add_buffer(MediaBuffer *buffer);
 
-    // Blocks until a buffer is available and returns it to the caller,
-    // the returned buffer will have a reference count of 1.
-    status_t acquire_buffer(MediaBuffer **buffer);
+    // If nonBlocking is false, it blocks until a buffer is available and
+    // passes it to the caller in *buffer, while returning OK.
+    // The returned buffer will have a reference count of 1.
+    // If nonBlocking is true and a buffer is not immediately available,
+    // buffer is set to NULL and it returns WOULD_BLOCK.
+    status_t acquire_buffer(MediaBuffer **buffer, bool nonBlocking = false);
 
 protected:
     virtual void signalBufferReturned(MediaBuffer *buffer);
diff --git a/include/media/stagefright/MediaCodec.h b/include/media/stagefright/MediaCodec.h
index a0ff997..3f7508b 100644
--- a/include/media/stagefright/MediaCodec.h
+++ b/include/media/stagefright/MediaCodec.h
@@ -73,6 +73,10 @@
     // unconfigured.
     status_t stop();
 
+    // Resets the codec to the INITIALIZED state.  Can be called after an error
+    // has occured to make the codec usable.
+    status_t reset();
+
     // Client MUST call release before releasing final reference to this
     // object.
     status_t release();
@@ -221,6 +225,11 @@
     sp<AMessage> mInputFormat;
     sp<AMessage> mCallback;
 
+    // initial create parameters
+    AString mInitName;
+    bool mInitNameIsType;
+    bool mInitIsEncoder;
+
     // Used only to synchronize asynchronous getBufferAndFormat
     // across all the other (synchronous) buffer state change
     // operations, such as de/queueIn/OutputBuffer, start and
diff --git a/include/media/stagefright/MediaCodecList.h b/include/media/stagefright/MediaCodecList.h
index 01a5daf..c11fcc9 100644
--- a/include/media/stagefright/MediaCodecList.h
+++ b/include/media/stagefright/MediaCodecList.h
@@ -25,9 +25,12 @@
 #include <utils/Errors.h>
 #include <utils/KeyedVector.h>
 #include <utils/Vector.h>
+#include <utils/StrongPointer.h>
 
 namespace android {
 
+struct AMessage;
+
 struct MediaCodecList {
     static const MediaCodecList *getInstance();
 
@@ -51,15 +54,19 @@
             size_t index, const char *type,
             Vector<ProfileLevel> *profileLevels,
             Vector<uint32_t> *colorFormats,
-            uint32_t *flags) const;
+            uint32_t *flags,
+            // TODO default argument is only for compatibility with existing JNI
+            sp<AMessage> *capabilities = NULL) const;
 
 private:
     enum Section {
         SECTION_TOPLEVEL,
         SECTION_DECODERS,
         SECTION_DECODER,
+        SECTION_DECODER_TYPE,
         SECTION_ENCODERS,
         SECTION_ENCODER,
+        SECTION_ENCODER_TYPE,
         SECTION_INCLUDE,
     };
 
@@ -67,7 +74,10 @@
         AString mName;
         bool mIsEncoder;
         uint32_t mTypes;
+        uint32_t mSoleType;
         uint32_t mQuirks;
+        KeyedVector<uint32_t, sp<AMessage> > mCaps;
+        sp<AMessage> mCurrentCaps;
     };
 
     static MediaCodecList *sCodecList;
@@ -103,6 +113,8 @@
 
     status_t addQuirk(const char **attrs);
     status_t addTypeFromAttributes(const char **attrs);
+    status_t addLimit(const char **attrs);
+    status_t addFeature(const char **attrs);
     void addType(const char *name);
 
     DISALLOW_EVIL_CONSTRUCTORS(MediaCodecList);
diff --git a/include/media/stagefright/MediaSource.h b/include/media/stagefright/MediaSource.h
index 204d1c6..a653db9 100644
--- a/include/media/stagefright/MediaSource.h
+++ b/include/media/stagefright/MediaSource.h
@@ -82,6 +82,10 @@
         void setLateBy(int64_t lateness_us);
         int64_t getLateBy() const;
 
+        void setNonBlocking();
+        void clearNonBlocking();
+        bool getNonBlocking() const;
+
     private:
         enum Options {
             kSeekTo_Option      = 1,
@@ -91,6 +95,7 @@
         int64_t mSeekTimeUs;
         SeekMode mSeekMode;
         int64_t mLatenessUs;
+        bool mNonBlocking;
     };
 
     // Causes this source to suspend pulling data from its upstream source
diff --git a/include/media/stagefright/MetaData.h b/include/media/stagefright/MetaData.h
index d38d976..087d016 100644
--- a/include/media/stagefright/MetaData.h
+++ b/include/media/stagefright/MetaData.h
@@ -166,6 +166,13 @@
     kKeyCryptoDefaultIVSize = 'cryS',  // int32_t
 
     kKeyPssh              = 'pssh',  // raw data
+
+    // Please see MediaFormat.KEY_IS_AUTOSELECT.
+    kKeyTrackIsAutoselect = 'auto', // bool (int32_t)
+    // Please see MediaFormat.KEY_IS_DEFAULT.
+    kKeyTrackIsDefault    = 'dflt', // bool (int32_t)
+    // Similar to MediaFormat.KEY_IS_FORCED_SUBTITLE but pertains to av tracks as well.
+    kKeyTrackIsForced     = 'frcd', // bool (int32_t)
 };
 
 enum {
diff --git a/include/media/stagefright/foundation/AMessage.h b/include/media/stagefright/foundation/AMessage.h
index 7e823eb..5846d6b 100644
--- a/include/media/stagefright/foundation/AMessage.h
+++ b/include/media/stagefright/foundation/AMessage.h
@@ -50,6 +50,7 @@
     void setDouble(const char *name, double value);
     void setPointer(const char *name, void *value);
     void setString(const char *name, const char *s, ssize_t len = -1);
+    void setString(const char *name, const AString &s);
     void setObject(const char *name, const sp<RefBase> &obj);
     void setBuffer(const char *name, const sp<ABuffer> &buffer);
     void setMessage(const char *name, const sp<AMessage> &obj);
@@ -58,6 +59,8 @@
             const char *name,
             int32_t left, int32_t top, int32_t right, int32_t bottom);
 
+    bool contains(const char *name) const;
+
     bool findInt32(const char *name, int32_t *value) const;
     bool findInt64(const char *name, int64_t *value) const;
     bool findSize(const char *name, size_t *value) const;
diff --git a/include/media/stagefright/foundation/AString.h b/include/media/stagefright/foundation/AString.h
index 0edaa1c..4be3c6d 100644
--- a/include/media/stagefright/foundation/AString.h
+++ b/include/media/stagefright/foundation/AString.h
@@ -70,6 +70,9 @@
     size_t hash() const;
 
     bool operator==(const AString &other) const;
+    bool operator!=(const AString &other) const {
+        return !operator==(other);
+    }
     bool operator<(const AString &other) const;
     bool operator>(const AString &other) const;
 
diff --git a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
index 38ee82b..6c2cbe3 100644
--- a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
+++ b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
@@ -223,6 +223,8 @@
         pContext->pBundledContext->bBassTempDisabled        = LVM_FALSE;
         pContext->pBundledContext->bVirtualizerEnabled      = LVM_FALSE;
         pContext->pBundledContext->bVirtualizerTempDisabled = LVM_FALSE;
+        pContext->pBundledContext->nOutputDevice            = AUDIO_DEVICE_NONE;
+        pContext->pBundledContext->nVirtualizerForcedDevice = AUDIO_DEVICE_NONE;
         pContext->pBundledContext->NumberEffectsEnabled     = 0;
         pContext->pBundledContext->NumberEffectsCalled      = 0;
         pContext->pBundledContext->firstVolume              = LVM_TRUE;
@@ -1166,6 +1168,177 @@
     //ALOGV("\tVirtualizerSetStrength Succesfully called LVM_SetControlParameters\n\n");
 }    /* end setStrength */
 
+//----------------------------------------------------------------------------
+// VirtualizerIsDeviceSupported()
+//----------------------------------------------------------------------------
+// Purpose:
+// Check if an audio device type is supported by this implementation
+//
+// Inputs:
+//  deviceType   the type of device that affects the processing (e.g. for binaural vs transaural)
+// Output:
+//  -EINVAL      if the configuration is not supported or it is unknown
+//  0            if the configuration is supported
+//----------------------------------------------------------------------------
+int VirtualizerIsDeviceSupported(audio_devices_t deviceType) {
+    switch (deviceType) {
+    case AUDIO_DEVICE_OUT_WIRED_HEADSET:
+    case AUDIO_DEVICE_OUT_WIRED_HEADPHONE:
+    case AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES:
+        return 0;
+    default :
+        return -EINVAL;
+    }
+}
+
+//----------------------------------------------------------------------------
+// VirtualizerIsConfigurationSupported()
+//----------------------------------------------------------------------------
+// Purpose:
+// Check if a channel mask + audio device type is supported by this implementation
+//
+// Inputs:
+//  channelMask  the channel mask of the input to virtualize
+//  deviceType   the type of device that affects the processing (e.g. for binaural vs transaural)
+// Output:
+//  -EINVAL      if the configuration is not supported or it is unknown
+//  0            if the configuration is supported
+//----------------------------------------------------------------------------
+int VirtualizerIsConfigurationSupported(audio_channel_mask_t channelMask,
+        audio_devices_t deviceType) {
+    uint32_t channelCount = audio_channel_count_from_out_mask(channelMask);
+    if ((channelCount == 0) || (channelCount > 2)) {
+        return -EINVAL;
+    }
+
+    return VirtualizerIsDeviceSupported(deviceType);
+}
+
+//----------------------------------------------------------------------------
+// VirtualizerForceVirtualizationMode()
+//----------------------------------------------------------------------------
+// Purpose:
+// Force the virtualization mode to that of the given audio device
+//
+// Inputs:
+//  pContext     effect engine context
+//  forcedDevice the type of device whose virtualization mode we'll always use
+// Output:
+//  -EINVAL      if the device is not supported or is unknown
+//  0            if the device is supported and the virtualization mode forced
+//
+//----------------------------------------------------------------------------
+int VirtualizerForceVirtualizationMode(EffectContext *pContext, audio_devices_t forcedDevice) {
+    ALOGV("VirtualizerForceVirtualizationMode: forcedDev=0x%x enabled=%d tmpDisabled=%d",
+            forcedDevice, pContext->pBundledContext->bVirtualizerEnabled,
+            pContext->pBundledContext->bVirtualizerTempDisabled);
+    int status = 0;
+    bool useVirtualizer = false;
+
+    if (VirtualizerIsDeviceSupported(forcedDevice) != 0) {
+        // forced device is not supported, make it behave as a reset of forced mode
+        forcedDevice = AUDIO_DEVICE_NONE;
+        // but return an error
+        status = -EINVAL;
+    }
+
+    if (forcedDevice == AUDIO_DEVICE_NONE) {
+        // disabling forced virtualization mode:
+        // verify whether the virtualization should be enabled or disabled
+        if (VirtualizerIsDeviceSupported(pContext->pBundledContext->nOutputDevice) == 0) {
+            useVirtualizer = (pContext->pBundledContext->bVirtualizerEnabled == LVM_TRUE);
+        }
+        pContext->pBundledContext->nVirtualizerForcedDevice = AUDIO_DEVICE_NONE;
+    } else {
+        // forcing virtualization mode: here we already know the device is supported
+        pContext->pBundledContext->nVirtualizerForcedDevice = AUDIO_DEVICE_OUT_WIRED_HEADPHONE;
+        // only enable for a supported mode, when the effect is enabled
+        useVirtualizer = (pContext->pBundledContext->bVirtualizerEnabled == LVM_TRUE);
+    }
+
+    if (useVirtualizer) {
+        if (pContext->pBundledContext->bVirtualizerTempDisabled == LVM_TRUE) {
+            ALOGV("\tVirtualizerForceVirtualizationMode re-enable LVM_VIRTUALIZER");
+            android::LvmEffect_enable(pContext);
+            pContext->pBundledContext->bVirtualizerTempDisabled = LVM_FALSE;
+        } else {
+            ALOGV("\tVirtualizerForceVirtualizationMode leaving LVM_VIRTUALIZER enabled");
+        }
+    } else {
+        if (pContext->pBundledContext->bVirtualizerTempDisabled == LVM_FALSE) {
+            ALOGV("\tVirtualizerForceVirtualizationMode disable LVM_VIRTUALIZER");
+            android::LvmEffect_disable(pContext);
+            pContext->pBundledContext->bVirtualizerTempDisabled = LVM_TRUE;
+        } else {
+            ALOGV("\tVirtualizerForceVirtualizationMode leaving LVM_VIRTUALIZER disabled");
+        }
+    }
+
+    ALOGV("\tafter VirtualizerForceVirtualizationMode: enabled=%d tmpDisabled=%d",
+            pContext->pBundledContext->bVirtualizerEnabled,
+            pContext->pBundledContext->bVirtualizerTempDisabled);
+
+    return status;
+}
+//----------------------------------------------------------------------------
+// VirtualizerGetSpeakerAngles()
+//----------------------------------------------------------------------------
+// Purpose:
+// Get the virtual speaker angles for a channel mask + audio device type
+// configuration which is guaranteed to be supported by this implementation
+//
+// Inputs:
+//  channelMask:   the channel mask of the input to virtualize
+//  deviceType     the type of device that affects the processing (e.g. for binaural vs transaural)
+// Input/Output:
+//  pSpeakerAngles the array of integer where each speaker angle is written as a triplet in the
+//                 following format:
+//                    int32_t a bit mask with a single value selected for each speaker, following
+//                            the convention of the audio_channel_mask_t type
+//                    int32_t a value in degrees expressing the speaker azimuth, where 0 is in front
+//                            of the user, 180 behind, -90 to the left, 90 to the right of the user
+//                    int32_t a value in degrees expressing the speaker elevation, where 0 is the
+//                            horizontal plane, +90 is directly above the user, -90 below
+//
+//----------------------------------------------------------------------------
+void VirtualizerGetSpeakerAngles(audio_channel_mask_t channelMask __unused,
+        audio_devices_t deviceType __unused, int32_t *pSpeakerAngles) {
+    // the channel count is guaranteed to be 1 or 2
+    // the device is guaranteed to be of type headphone
+    // this virtualizer is always 2in with speakers at -90 and 90deg of azimuth, 0deg of elevation
+    *pSpeakerAngles++ = (int32_t) AUDIO_CHANNEL_OUT_FRONT_LEFT;
+    *pSpeakerAngles++ = -90; // azimuth
+    *pSpeakerAngles++ = 0;   // elevation
+    *pSpeakerAngles++ = (int32_t) AUDIO_CHANNEL_OUT_FRONT_RIGHT;
+    *pSpeakerAngles++ = 90;  // azimuth
+    *pSpeakerAngles   = 0;   // elevation
+}
+
+//----------------------------------------------------------------------------
+// VirtualizerGetVirtualizationMode()
+//----------------------------------------------------------------------------
+// Purpose:
+// Retrieve the current device whose processing mode is used by this effect
+//
+// Output:
+//   AUDIO_DEVICE_NONE if the effect is not virtualizing
+//   or the device type if the effect is virtualizing
+//----------------------------------------------------------------------------
+audio_devices_t VirtualizerGetVirtualizationMode(EffectContext *pContext) {
+    audio_devices_t virtDevice = AUDIO_DEVICE_NONE;
+    if ((pContext->pBundledContext->bVirtualizerEnabled == LVM_TRUE)
+            && (pContext->pBundledContext->bVirtualizerTempDisabled == LVM_FALSE)) {
+        if (pContext->pBundledContext->nVirtualizerForcedDevice != AUDIO_DEVICE_NONE) {
+            // virtualization mode is forced, return that device
+            virtDevice = pContext->pBundledContext->nVirtualizerForcedDevice;
+        } else {
+            // no forced mode, return the current device
+            virtDevice = pContext->pBundledContext->nOutputDevice;
+        }
+    }
+    ALOGV("VirtualizerGetVirtualizationMode() returning 0x%x", virtDevice);
+    return virtDevice;
+}
 
 //----------------------------------------------------------------------------
 // EqualizerLimitBandLevels()
@@ -1903,7 +2076,17 @@
             }
             *pValueSize = sizeof(int16_t);
             break;
-
+        case VIRTUALIZER_PARAM_VIRTUAL_SPEAKER_ANGLES:
+            // return value size can only be interpreted as relative to input value,
+            // deferring validity check to below
+            break;
+        case VIRTUALIZER_PARAM_VIRTUALIZATION_MODE:
+            if (*pValueSize != sizeof(uint32_t)){
+                ALOGV("\tLVM_ERROR : Virtualizer_getParameter() invalid pValueSize %d",*pValueSize);
+                return -EINVAL;
+            }
+            *pValueSize = sizeof(uint32_t);
+            break;
         default:
             ALOGV("\tLVM_ERROR : Virtualizer_getParameter() invalid param %d", param);
             return -EINVAL;
@@ -1924,13 +2107,36 @@
             //        *(int16_t *)pValue);
             break;
 
+        case VIRTUALIZER_PARAM_VIRTUAL_SPEAKER_ANGLES: {
+            const audio_channel_mask_t channelMask = (audio_channel_mask_t) *pParamTemp++;
+            const audio_devices_t deviceType = (audio_devices_t) *pParamTemp;
+            uint32_t nbChannels = audio_channel_count_from_out_mask(channelMask);
+            if (*pValueSize < 3 * nbChannels * sizeof(int32_t)){
+                ALOGV("\tLVM_ERROR : Virtualizer_getParameter() invalid pValueSize %d",*pValueSize);
+                return -EINVAL;
+            }
+            // verify the configuration is supported
+            status = VirtualizerIsConfigurationSupported(channelMask, deviceType);
+            if (status == 0) {
+                ALOGV("VIRTUALIZER_PARAM_VIRTUAL_SPEAKER_ANGLES supports mask=0x%x device=0x%x",
+                        channelMask, deviceType);
+                // configuration is supported, get the angles
+                VirtualizerGetSpeakerAngles(channelMask, deviceType, (int32_t *)pValue);
+            }
+            }
+            break;
+
+        case VIRTUALIZER_PARAM_VIRTUALIZATION_MODE:
+            *(uint32_t *)pValue  = (uint32_t) VirtualizerGetVirtualizationMode(pContext);
+            break;
+
         default:
             ALOGV("\tLVM_ERROR : Virtualizer_getParameter() invalid param %d", param);
             status = -EINVAL;
             break;
     }
 
-    //ALOGV("\tVirtualizer_getParameter end");
+    ALOGV("\tVirtualizer_getParameter end returning status=%d", status);
     return status;
 } /* end Virtualizer_getParameter */
 
@@ -1965,6 +2171,15 @@
             VirtualizerSetStrength(pContext, (int32_t)strength);
             //ALOGV("\tVirtualizer_setParameter() Called pVirtualizer->setStrength");
            break;
+
+        case VIRTUALIZER_PARAM_FORCE_VIRTUALIZATION_MODE: {
+            const audio_devices_t deviceType = *(audio_devices_t *) pValue;
+            status = VirtualizerForceVirtualizationMode(pContext, deviceType);
+            //ALOGV("VIRTUALIZER_PARAM_FORCE_VIRTUALIZATION_MODE device=0x%x result=%d",
+            //        deviceType, status);
+            }
+            break;
+
         default:
             ALOGV("\tLVM_ERROR : Virtualizer_setParameter() invalid param %d", param);
             break;
@@ -2865,7 +3080,6 @@
                                                               (void *)p->data,
                                                               &p->vsize,
                                                               p->data + voffset);
-
                 *replySize = sizeof(effect_param_t) + voffset + p->vsize;
 
                 //ALOGV("\tVirtualizer_command EFFECT_CMD_GET_PARAM "
@@ -2976,14 +3190,17 @@
                                                                     p->data + p->psize);
             }
             if(pContext->EffectType == LVM_VIRTUALIZER){
+              // Warning this log will fail to properly read an int32_t value, assumes int16_t
               //ALOGV("\tVirtualizer_command EFFECT_CMD_SET_PARAM param %d, *replySize %d, value %d",
               //        *(int32_t *)((char *)pCmdData + sizeof(effect_param_t)),
               //        *replySize,
               //        *(int16_t *)((char *)pCmdData + sizeof(effect_param_t) + sizeof(int32_t)));
 
-                if (pCmdData   == NULL||
-                    cmdSize    != (sizeof(effect_param_t) + sizeof(int32_t) +sizeof(int16_t))||
-                    pReplyData == NULL||
+                if (pCmdData   == NULL ||
+                    // legal parameters are int16_t or int32_t
+                    cmdSize    > (sizeof(effect_param_t) + sizeof(int32_t) +sizeof(int32_t)) ||
+                    cmdSize    < (sizeof(effect_param_t) + sizeof(int32_t) +sizeof(int16_t)) ||
+                    pReplyData == NULL ||
                     *replySize != sizeof(int32_t)){
                     ALOGV("\tLVM_ERROR : Virtualizer_command cmdCode Case: "
                             "EFFECT_CMD_SET_PARAM: ERROR");
@@ -3075,6 +3292,7 @@
         {
             ALOGV("\tEffect_command cmdCode Case: EFFECT_CMD_SET_DEVICE start");
             uint32_t device = *(uint32_t *)pCmdData;
+            pContext->pBundledContext->nOutputDevice = (audio_devices_t) device;
 
             if (pContext->EffectType == LVM_BASS_BOOST) {
                 if((device == AUDIO_DEVICE_OUT_SPEAKER) ||
@@ -3110,37 +3328,38 @@
                 }
             }
             if (pContext->EffectType == LVM_VIRTUALIZER) {
-                if((device == AUDIO_DEVICE_OUT_SPEAKER)||
-                        (device == AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT)||
-                        (device == AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER)){
-                    ALOGV("\tEFFECT_CMD_SET_DEVICE device is invalid for LVM_VIRTUALIZER %d",
-                          *(int32_t *)pCmdData);
-                    ALOGV("\tEFFECT_CMD_SET_DEVICE temporary disable LVM_VIRTUALIZER");
+                if (pContext->pBundledContext->nVirtualizerForcedDevice == AUDIO_DEVICE_NONE) {
+                    // default case unless configuration is forced
+                    if (android::VirtualizerIsDeviceSupported(device) != 0) {
+                        ALOGV("\tEFFECT_CMD_SET_DEVICE device is invalid for LVM_VIRTUALIZER %d",
+                                *(int32_t *)pCmdData);
+                        ALOGV("\tEFFECT_CMD_SET_DEVICE temporary disable LVM_VIRTUALIZER");
 
-                    //If a device doesnt support virtualizer the effect must be temporarily disabled
-                    // the effect must still report its original state as this can only be changed
-                    // by the ENABLE/DISABLE command
+                        //If a device doesnt support virtualizer the effect must be temporarily
+                        // disabled the effect must still report its original state as this can
+                        // only be changed by the ENABLE/DISABLE command
 
-                    if (pContext->pBundledContext->bVirtualizerEnabled == LVM_TRUE) {
-                        ALOGV("\tEFFECT_CMD_SET_DEVICE disable LVM_VIRTUALIZER %d",
-                              *(int32_t *)pCmdData);
-                        android::LvmEffect_disable(pContext);
+                        if (pContext->pBundledContext->bVirtualizerEnabled == LVM_TRUE) {
+                            ALOGV("\tEFFECT_CMD_SET_DEVICE disable LVM_VIRTUALIZER %d",
+                                    *(int32_t *)pCmdData);
+                            android::LvmEffect_disable(pContext);
+                        }
+                        pContext->pBundledContext->bVirtualizerTempDisabled = LVM_TRUE;
+                    } else {
+                        ALOGV("\tEFFECT_CMD_SET_DEVICE device is valid for LVM_VIRTUALIZER %d",
+                                *(int32_t *)pCmdData);
+
+                        // If a device supports virtualizer and the effect has been temporarily
+                        // disabled previously then re-enable it
+
+                        if(pContext->pBundledContext->bVirtualizerEnabled == LVM_TRUE){
+                            ALOGV("\tEFFECT_CMD_SET_DEVICE re-enable LVM_VIRTUALIZER %d",
+                                    *(int32_t *)pCmdData);
+                            android::LvmEffect_enable(pContext);
+                        }
+                        pContext->pBundledContext->bVirtualizerTempDisabled = LVM_FALSE;
                     }
-                    pContext->pBundledContext->bVirtualizerTempDisabled = LVM_TRUE;
-                } else {
-                    ALOGV("\tEFFECT_CMD_SET_DEVICE device is valid for LVM_VIRTUALIZER %d",
-                          *(int32_t *)pCmdData);
-
-                    // If a device supports virtualizer and the effect has been temporarily disabled
-                    // previously then re-enable it
-
-                    if(pContext->pBundledContext->bVirtualizerEnabled == LVM_TRUE){
-                        ALOGV("\tEFFECT_CMD_SET_DEVICE re-enable LVM_VIRTUALIZER %d",
-                              *(int32_t *)pCmdData);
-                        android::LvmEffect_enable(pContext);
-                    }
-                    pContext->pBundledContext->bVirtualizerTempDisabled = LVM_FALSE;
-                }
+                } // else virtualization mode is forced to a certain device, nothing to do
             }
             ALOGV("\tEffect_command cmdCode Case: EFFECT_CMD_SET_DEVICE end");
             break;
diff --git a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.h b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.h
index 330bb32..420f973 100644
--- a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.h
+++ b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.h
@@ -73,6 +73,8 @@
     bool                            bBassTempDisabled;        /* Flag for Bass to be re-enabled */
     bool                            bVirtualizerEnabled;      /* Flag for Virtualizer */
     bool                            bVirtualizerTempDisabled; /* Flag for effect to be re-enabled */
+    audio_devices_t                 nOutputDevice;            /* Output device for the effect */
+    audio_devices_t                 nVirtualizerForcedDevice; /* Forced device virtualization mode*/
     int                             NumberEffectsEnabled;     /* Effects in this session */
     int                             NumberEffectsCalled;      /* Effects called so far */
     bool                            firstVolume;              /* No smoothing on first Vol change */
diff --git a/media/libmedia/AudioRecord.cpp b/media/libmedia/AudioRecord.cpp
index 3ee5809..80c8c5e 100644
--- a/media/libmedia/AudioRecord.cpp
+++ b/media/libmedia/AudioRecord.cpp
@@ -211,7 +211,7 @@
     mReqFrameCount = frameCount;
 
     mNotificationFramesReq = notificationFrames;
-    mNotificationFramesAct = 0;
+    // mNotificationFramesAct is initialized in openRecord_l
 
     if (sessionId == AUDIO_SESSION_ALLOCATE) {
         mSessionId = AudioSystem::newAudioSessionId();
@@ -444,60 +444,25 @@
         }
     }
 
-    // FIXME Assume double buffering, because we don't know the true HAL sample rate
-    const uint32_t nBuffering = 2;
-
-    mNotificationFramesAct = mNotificationFramesReq;
-    size_t frameCount = mReqFrameCount;
-
-    if (!(mFlags & AUDIO_INPUT_FLAG_FAST)) {
-        // validate framecount
-        // If fast track was not requested, this preserves
-        // the old behavior of validating on client side.
-        // FIXME Eventually the validation should be done on server side
-        // regardless of whether it's a fast or normal track.  It's debatable
-        // whether to account for the input latency to provision buffers appropriately.
-        size_t minFrameCount;
-        status = AudioRecord::getMinFrameCount(&minFrameCount,
-                mSampleRate, mFormat, mChannelMask);
-        if (status != NO_ERROR) {
-            ALOGE("getMinFrameCount() failed for sampleRate %u, format %#x, channelMask %#x; "
-                    "status %d",
-                    mSampleRate, mFormat, mChannelMask, status);
-            return status;
-        }
-
-        if (frameCount == 0) {
-            frameCount = minFrameCount;
-        } else if (frameCount < minFrameCount) {
-            ALOGE("frameCount %zu < minFrameCount %zu", frameCount, minFrameCount);
-            return BAD_VALUE;
-        }
-
-        // Make sure that application is notified with sufficient margin before overrun
-        if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/2) {
-            mNotificationFramesAct = frameCount/2;
-        }
-    }
-
     audio_io_handle_t input = AudioSystem::getInput(mInputSource, mSampleRate, mFormat,
-            mChannelMask, mSessionId);
+            mChannelMask, mSessionId, mFlags);
     if (input == AUDIO_IO_HANDLE_NONE) {
         ALOGE("Could not get audio input for record source %d, sample rate %u, format %#x, "
-              "channel mask %#x, session %d",
-              mInputSource, mSampleRate, mFormat, mChannelMask, mSessionId);
+              "channel mask %#x, session %d, flags %#x",
+              mInputSource, mSampleRate, mFormat, mChannelMask, mSessionId, mFlags);
         return BAD_VALUE;
     }
     {
     // Now that we have a reference to an I/O handle and have not yet handed it off to AudioFlinger,
     // we must release it ourselves if anything goes wrong.
 
+    size_t frameCount = mReqFrameCount;
     size_t temp = frameCount;   // temp may be replaced by a revised value of frameCount,
                                 // but we will still need the original value also
     int originalSessionId = mSessionId;
 
     // The notification frame count is the period between callbacks, as suggested by the server.
-    size_t notificationFrames;
+    size_t notificationFrames = mNotificationFramesReq;
 
     sp<IMemory> iMem;           // for cblk
     sp<IMemory> bufferMem;
@@ -576,14 +541,14 @@
             // once denied, do not request again if IAudioRecord is re-created
             mFlags = (audio_input_flags_t) (mFlags & ~AUDIO_INPUT_FLAG_FAST);
         }
-        // Theoretically double-buffering is not required for fast tracks,
-        // due to tighter scheduling.  But in practice, to accomodate kernels with
-        // scheduling jitter, and apps with computation jitter, we use double-buffering.
-        if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/nBuffering) {
-            mNotificationFramesAct = frameCount/nBuffering;
-        }
     }
 
+    // Make sure that application is notified with sufficient margin before overrun
+    if (notificationFrames == 0 || notificationFrames > frameCount) {
+        ALOGW("Received notificationFrames %zu for frameCount %zu", notificationFrames, frameCount);
+    }
+    mNotificationFramesAct = notificationFrames;
+
     // We retain a copy of the I/O handle, but don't own the reference
     mInput = input;
     mRefreshRemaining = true;
diff --git a/media/libmedia/AudioSystem.cpp b/media/libmedia/AudioSystem.cpp
index a47d45c..fd5824b 100644
--- a/media/libmedia/AudioSystem.cpp
+++ b/media/libmedia/AudioSystem.cpp
@@ -688,11 +688,12 @@
                                     uint32_t samplingRate,
                                     audio_format_t format,
                                     audio_channel_mask_t channelMask,
-                                    int sessionId)
+                                    int sessionId,
+                                    audio_input_flags_t flags)
 {
     const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
     if (aps == 0) return 0;
-    return aps->getInput(inputSource, samplingRate, format, channelMask, sessionId);
+    return aps->getInput(inputSource, samplingRate, format, channelMask, sessionId, flags);
 }
 
 status_t AudioSystem::startInput(audio_io_handle_t input)
diff --git a/media/libmedia/IAudioFlinger.cpp b/media/libmedia/IAudioFlinger.cpp
index 5cf42f7..bd7ea46 100644
--- a/media/libmedia/IAudioFlinger.cpp
+++ b/media/libmedia/IAudioFlinger.cpp
@@ -197,6 +197,7 @@
             lSessionId = *sessionId;
         }
         data.writeInt32(lSessionId);
+        data.writeInt64(notificationFrames != NULL ? *notificationFrames : 0);
         cblk.clear();
         buffers.clear();
         status_t lStatus = remote()->transact(OPEN_RECORD, data, &reply);
@@ -532,7 +533,8 @@
                                         audio_devices_t *pDevices,
                                         uint32_t *pSamplingRate,
                                         audio_format_t *pFormat,
-                                        audio_channel_mask_t *pChannelMask)
+                                        audio_channel_mask_t *pChannelMask,
+                                        audio_input_flags_t flags)
     {
         Parcel data, reply;
         audio_devices_t devices = pDevices != NULL ? *pDevices : AUDIO_DEVICE_NONE;
@@ -547,6 +549,7 @@
         data.writeInt32(samplingRate);
         data.writeInt32(format);
         data.writeInt32(channelMask);
+        data.writeInt32(flags);
         remote()->transact(OPEN_INPUT, data, &reply);
         audio_io_handle_t input = (audio_io_handle_t) reply.readInt32();
         devices = (audio_devices_t)reply.readInt32();
@@ -964,7 +967,7 @@
             track_flags_t flags = (track_flags_t) data.readInt32();
             pid_t tid = (pid_t) data.readInt32();
             int sessionId = data.readInt32();
-            size_t notificationFrames = 0;
+            size_t notificationFrames = data.readInt64();
             sp<IMemory> cblk;
             sp<IMemory> buffers;
             status_t status;
@@ -1157,12 +1160,14 @@
             uint32_t samplingRate = data.readInt32();
             audio_format_t format = (audio_format_t) data.readInt32();
             audio_channel_mask_t channelMask = (audio_channel_mask_t)data.readInt32();
+            audio_input_flags_t flags = (audio_input_flags_t) data.readInt32();
 
             audio_io_handle_t input = openInput(module,
                                              &devices,
                                              &samplingRate,
                                              &format,
-                                             &channelMask);
+                                             &channelMask,
+                                             flags);
             reply->writeInt32((int32_t) input);
             reply->writeInt32(devices);
             reply->writeInt32(samplingRate);
diff --git a/media/libmedia/IAudioPolicyService.cpp b/media/libmedia/IAudioPolicyService.cpp
index 41a9065..40dfb58 100644
--- a/media/libmedia/IAudioPolicyService.cpp
+++ b/media/libmedia/IAudioPolicyService.cpp
@@ -225,7 +225,8 @@
                                     uint32_t samplingRate,
                                     audio_format_t format,
                                     audio_channel_mask_t channelMask,
-                                    int audioSession)
+                                    int audioSession,
+                                    audio_input_flags_t flags)
     {
         Parcel data, reply;
         data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
@@ -234,6 +235,7 @@
         data.writeInt32(static_cast <uint32_t>(format));
         data.writeInt32(channelMask);
         data.writeInt32(audioSession);
+        data.writeInt32(flags);
         remote()->transact(GET_INPUT, data, &reply);
         return static_cast <audio_io_handle_t> (reply.readInt32());
     }
@@ -707,11 +709,13 @@
             audio_format_t format = (audio_format_t) data.readInt32();
             audio_channel_mask_t channelMask = data.readInt32();
             int audioSession = data.readInt32();
+            audio_input_flags_t flags = (audio_input_flags_t) data.readInt32();
             audio_io_handle_t input = getInput(inputSource,
                                                samplingRate,
                                                format,
                                                channelMask,
-                                               audioSession);
+                                               audioSession,
+                                               flags);
             reply->writeInt32(static_cast <int>(input));
             return NO_ERROR;
         } break;
diff --git a/media/libmedia/SoundPool.cpp b/media/libmedia/SoundPool.cpp
index 2aa0592..d2e381b 100644
--- a/media/libmedia/SoundPool.cpp
+++ b/media/libmedia/SoundPool.cpp
@@ -28,6 +28,7 @@
 #include <media/mediaplayer.h>
 #include <media/SoundPool.h>
 #include "SoundPoolThread.h"
+#include <media/AudioPolicyHelper.h>
 
 namespace android
 {
@@ -39,10 +40,10 @@
 size_t kDefaultHeapSize = 1024 * 1024; // 1MB
 
 
-SoundPool::SoundPool(int maxChannels, audio_stream_type_t streamType, int srcQuality)
+SoundPool::SoundPool(int maxChannels, const audio_attributes_t* pAttributes)
 {
-    ALOGV("SoundPool constructor: maxChannels=%d, streamType=%d, srcQuality=%d",
-            maxChannels, streamType, srcQuality);
+    ALOGV("SoundPool constructor: maxChannels=%d, attr.usage=%d, attr.flags=0x%x, attr.tags=%s",
+            maxChannels, pAttributes->usage, pAttributes->flags, pAttributes->tags);
 
     // check limits
     mMaxChannels = maxChannels;
@@ -56,8 +57,7 @@
 
     mQuit = false;
     mDecodeThread = 0;
-    mStreamType = streamType;
-    mSrcQuality = srcQuality;
+    memcpy(&mAttributes, pAttributes, sizeof(audio_attributes_t));
     mAllocated = 0;
     mNextSampleID = 0;
     mNextChannelID = 0;
@@ -580,7 +580,7 @@
         // initialize track
         size_t afFrameCount;
         uint32_t afSampleRate;
-        audio_stream_type_t streamType = mSoundPool->streamType();
+        audio_stream_type_t streamType = audio_attributes_to_stream_type(mSoundPool->attributes());
         if (AudioSystem::getOutputFrameCount(&afFrameCount, streamType) != NO_ERROR) {
             afFrameCount = kDefaultFrameCount;
         }
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.cpp b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
index 388f77a..d75408d 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
@@ -28,6 +28,7 @@
 #include <media/stagefright/MediaExtractor.h>
 #include <media/stagefright/MediaSource.h>
 #include <media/stagefright/MetaData.h>
+#include "../../libstagefright/include/WVMExtractor.h"
 
 namespace android {
 
@@ -35,10 +36,16 @@
         const sp<AMessage> &notify,
         const sp<IMediaHTTPService> &httpService,
         const char *url,
-        const KeyedVector<String8, String8> *headers)
+        const KeyedVector<String8, String8> *headers,
+        bool isWidevine,
+        bool uidValid,
+        uid_t uid)
     : Source(notify),
       mDurationUs(0ll),
-      mAudioIsVorbis(false) {
+      mAudioIsVorbis(false),
+      mIsWidevine(isWidevine),
+      mUIDValid(uidValid),
+      mUID(uid) {
     DataSource::RegisterDefaultSniffers();
 
     sp<DataSource> dataSource =
@@ -63,7 +70,31 @@
 
 void NuPlayer::GenericSource::initFromDataSource(
         const sp<DataSource> &dataSource) {
-    sp<MediaExtractor> extractor = MediaExtractor::Create(dataSource);
+    sp<MediaExtractor> extractor;
+
+    if (mIsWidevine) {
+        String8 mimeType;
+        float confidence;
+        sp<AMessage> dummy;
+        bool success;
+
+        success = SniffWVM(dataSource, &mimeType, &confidence, &dummy);
+        if (!success
+                || strcasecmp(
+                    mimeType.string(), MEDIA_MIMETYPE_CONTAINER_WVM)) {
+            ALOGE("unsupported widevine mime: %s", mimeType.string());
+            return;
+        }
+
+        sp<WVMExtractor> wvmExtractor = new WVMExtractor(dataSource);
+        wvmExtractor->setAdaptiveStreamingMode(true);
+        if (mUIDValid) {
+            wvmExtractor->setUID(mUID);
+        }
+        extractor = wvmExtractor;
+    } else {
+        extractor = MediaExtractor::Create(dataSource);
+    }
 
     CHECK(extractor != NULL);
 
@@ -81,11 +112,12 @@
         const char *mime;
         CHECK(meta->findCString(kKeyMIMEType, &mime));
 
-        sp<MediaSource> track;
+        sp<MediaSource> track = extractor->getTrack(i);
 
         if (!strncasecmp(mime, "audio/", 6)) {
             if (mAudioTrack.mSource == NULL) {
-                mAudioTrack.mSource = track = extractor->getTrack(i);
+                mAudioTrack.mIndex = i;
+                mAudioTrack.mSource = track;
 
                 if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_VORBIS)) {
                     mAudioIsVorbis = true;
@@ -95,11 +127,13 @@
             }
         } else if (!strncasecmp(mime, "video/", 6)) {
             if (mVideoTrack.mSource == NULL) {
-                mVideoTrack.mSource = track = extractor->getTrack(i);
+                mVideoTrack.mIndex = i;
+                mVideoTrack.mSource = track;
             }
         }
 
         if (track != NULL) {
+            mSources.push(track);
             int64_t durationUs;
             if (meta->findInt64(kKeyDuration, &durationUs)) {
                 if (durationUs > mDurationUs) {
@@ -110,6 +144,13 @@
     }
 }
 
+status_t NuPlayer::GenericSource::setBuffers(bool audio, Vector<MediaBuffer *> &buffers) {
+    if (mIsWidevine && !audio) {
+        return mVideoTrack.mSource->setBuffers(buffers);
+    }
+    return INVALID_OPERATION;
+}
+
 NuPlayer::GenericSource::~GenericSource() {
 }
 
@@ -125,7 +166,8 @@
     }
 
     notifyFlagsChanged(
-            FLAG_CAN_PAUSE
+            (mIsWidevine ? FLAG_SECURE : 0)
+            | FLAG_CAN_PAUSE
             | FLAG_CAN_SEEK_BACKWARD
             | FLAG_CAN_SEEK_FORWARD
             | FLAG_CAN_SEEK);
@@ -177,9 +219,14 @@
         return -EWOULDBLOCK;
     }
 
+    if (mIsWidevine && !audio) {
+        // try to read a buffer as we may not have been able to the last time
+        readBuffer(audio, -1ll);
+    }
+
     status_t finalResult;
     if (!track->mPackets->hasBufferAvailable(&finalResult)) {
-        return finalResult == OK ? -EWOULDBLOCK : finalResult;
+        return (finalResult == OK ? -EWOULDBLOCK : finalResult);
     }
 
     status_t result = track->mPackets->dequeueAccessUnit(accessUnit);
@@ -194,6 +241,56 @@
     return OK;
 }
 
+size_t NuPlayer::GenericSource::getTrackCount() const {
+    return mSources.size();
+}
+
+sp<AMessage> NuPlayer::GenericSource::getTrackInfo(size_t trackIndex) const {
+    size_t trackCount = mSources.size();
+    if (trackIndex >= trackCount) {
+        return NULL;
+    }
+
+    sp<AMessage> format = new AMessage();
+    sp<MetaData> meta = mSources.itemAt(trackIndex)->getFormat();
+
+    const char *mime;
+    CHECK(meta->findCString(kKeyMIMEType, &mime));
+
+    int32_t trackType;
+    if (!strncasecmp(mime, "video/", 6)) {
+        trackType = MEDIA_TRACK_TYPE_VIDEO;
+    } else if (!strncasecmp(mime, "audio/", 6)) {
+        trackType = MEDIA_TRACK_TYPE_AUDIO;
+    } else if (!strcasecmp(mime, MEDIA_MIMETYPE_TEXT_3GPP)) {
+        trackType = MEDIA_TRACK_TYPE_TIMEDTEXT;
+    } else {
+        trackType = MEDIA_TRACK_TYPE_UNKNOWN;
+    }
+    format->setInt32("type", trackType);
+
+    const char *lang;
+    if (!meta->findCString(kKeyMediaLanguage, &lang)) {
+        lang = "und";
+    }
+    format->setString("language", lang);
+
+    if (trackType == MEDIA_TRACK_TYPE_SUBTITLE) {
+        format->setString("mime", mime);
+
+        int32_t isAutoselect = 1, isDefault = 0, isForced = 0;
+        meta->findInt32(kKeyTrackIsAutoselect, &isAutoselect);
+        meta->findInt32(kKeyTrackIsDefault, &isDefault);
+        meta->findInt32(kKeyTrackIsForced, &isForced);
+
+        format->setInt32("auto", !!isAutoselect);
+        format->setInt32("default", !!isDefault);
+        format->setInt32("forced", !!isForced);
+    }
+
+    return format;
+}
+
 status_t NuPlayer::GenericSource::seekTo(int64_t seekTimeUs) {
     if (mVideoTrack.mSource != NULL) {
         int64_t actualTimeUs;
@@ -227,6 +324,10 @@
         seeking = true;
     }
 
+    if (mIsWidevine && !audio) {
+        options.setNonBlocking();
+    }
+
     for (;;) {
         MediaBuffer *mbuf;
         status_t err = track->mSource->read(&mbuf, &options);
@@ -240,11 +341,18 @@
                 outLength += sizeof(int32_t);
             }
 
-            sp<ABuffer> buffer = new ABuffer(outLength);
-
-            memcpy(buffer->data(),
-                   (const uint8_t *)mbuf->data() + mbuf->range_offset(),
-                   mbuf->range_length());
+            sp<ABuffer> buffer;
+            if (mIsWidevine && !audio) {
+                // data is already provided in the buffer
+                buffer = new ABuffer(NULL, mbuf->range_length());
+                buffer->meta()->setPointer("mediaBuffer", mbuf);
+                mbuf->add_ref();
+            } else {
+                buffer = new ABuffer(outLength);
+                memcpy(buffer->data(),
+                       (const uint8_t *)mbuf->data() + mbuf->range_offset(),
+                       mbuf->range_length());
+            }
 
             if (audio && mAudioIsVorbis) {
                 int32_t numPageSamples;
@@ -279,6 +387,8 @@
 
             track->mPackets->queueAccessUnit(buffer);
             break;
+        } else if (err == WOULD_BLOCK) {
+            break;
         } else if (err == INFO_FORMAT_CHANGED) {
 #if 0
             track->mPackets->queueDiscontinuity(
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.h b/media/libmediaplayerservice/nuplayer/GenericSource.h
index 20d597e..8e0209d 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.h
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.h
@@ -35,7 +35,10 @@
             const sp<AMessage> &notify,
             const sp<IMediaHTTPService> &httpService,
             const char *url,
-            const KeyedVector<String8, String8> *headers);
+            const KeyedVector<String8, String8> *headers,
+            bool isWidevine = false,
+            bool uidValid = false,
+            uid_t uid = 0);
 
     GenericSource(
             const sp<AMessage> &notify,
@@ -50,15 +53,22 @@
     virtual status_t dequeueAccessUnit(bool audio, sp<ABuffer> *accessUnit);
 
     virtual status_t getDuration(int64_t *durationUs);
+    virtual size_t getTrackCount() const;
+    virtual sp<AMessage> getTrackInfo(size_t trackIndex) const;
     virtual status_t seekTo(int64_t seekTimeUs);
 
+    virtual status_t setBuffers(bool audio, Vector<MediaBuffer *> &buffers);
+
 protected:
     virtual ~GenericSource();
 
     virtual sp<MetaData> getFormatMeta(bool audio);
 
 private:
+    Vector<sp<MediaSource> > mSources;
+
     struct Track {
+        size_t mIndex;
         sp<MediaSource> mSource;
         sp<AnotherPacketSource> mPackets;
     };
@@ -68,6 +78,9 @@
 
     int64_t mDurationUs;
     bool mAudioIsVorbis;
+    bool mIsWidevine;
+    bool mUIDValid;
+    uid_t mUID;
 
     void initFromDataSource(const sp<DataSource> &dataSource);
 
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
index 88c59bf..6ccd27a 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
@@ -36,6 +36,7 @@
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/MediaBuffer.h>
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/MetaData.h>
@@ -221,6 +222,10 @@
                     || strstr(url, ".sdp?"))) {
         source = new RTSPSource(
                 notify, httpService, url, headers, mUIDValid, mUID, true);
+    } else if ((!strncasecmp(url, "widevine://", 11))) {
+        source = new GenericSource(notify, httpService, url, headers,
+                true /* isWidevine */, mUIDValid, mUID);
+        mSourceFlags |= Source::FLAG_SECURE;
     } else {
         source = new GenericSource(notify, httpService, url, headers);
     }
@@ -512,6 +517,17 @@
             mNumFramesDropped = 0;
             mStarted = true;
 
+            /* instantiate decoders now for secure playback */
+            if (mSourceFlags & Source::FLAG_SECURE) {
+                if (mNativeWindow != NULL) {
+                    instantiateDecoder(false, &mVideoDecoder);
+                }
+
+                if (mAudioSink != NULL) {
+                    instantiateDecoder(true, &mAudioDecoder);
+                }
+            }
+
             mSource->start();
 
             uint32_t flags = 0;
@@ -540,7 +556,10 @@
                     new AMessage(kWhatRendererNotify, id()),
                     flags);
 
-            looper()->registerHandler(mRenderer);
+            mRendererLooper = new ALooper;
+            mRendererLooper->setName("NuPlayerRenderer");
+            mRendererLooper->start(false, false, ANDROID_PRIORITY_AUDIO);
+            mRendererLooper->registerHandler(mRenderer);
 
             postScanSources();
             break;
@@ -1055,6 +1074,10 @@
 
         sp<AMessage> ccNotify = new AMessage(kWhatClosedCaptionNotify, id());
         mCCDecoder = new CCDecoder(ccNotify);
+
+        if (mSourceFlags & Source::FLAG_SECURE) {
+            format->setInt32("secure", true);
+        }
     }
 
     sp<AMessage> notify =
@@ -1073,6 +1096,28 @@
     (*decoder)->init();
     (*decoder)->configure(format);
 
+    // allocate buffers to decrypt widevine source buffers
+    if (!audio && (mSourceFlags & Source::FLAG_SECURE)) {
+        Vector<sp<ABuffer> > inputBufs;
+        CHECK_EQ((*decoder)->getInputBuffers(&inputBufs), (status_t)OK);
+
+        Vector<MediaBuffer *> mediaBufs;
+        for (size_t i = 0; i < inputBufs.size(); i++) {
+            const sp<ABuffer> &buffer = inputBufs[i];
+            MediaBuffer *mbuf = new MediaBuffer(buffer->data(), buffer->size());
+            mediaBufs.push(mbuf);
+        }
+
+        status_t err = mSource->setBuffers(audio, mediaBufs);
+        if (err != OK) {
+            for (size_t i = 0; i < mediaBufs.size(); ++i) {
+                mediaBufs[i]->release();
+            }
+            mediaBufs.clear();
+            ALOGE("Secure source didn't support secure mediaBufs.");
+            return err;
+        }
+    }
     return OK;
 }
 
@@ -1184,6 +1229,7 @@
 
         dropAccessUnit = false;
         if (!audio
+                && !(mSourceFlags & Source::FLAG_SECURE)
                 && mVideoLateByUs > 100000ll
                 && mVideoIsAVC
                 && !IsAVCReferenceFrame(accessUnit)) {
@@ -1497,6 +1543,13 @@
     ++mScanSourcesGeneration;
     mScanSourcesPending = false;
 
+    if (mRendererLooper != NULL) {
+        if (mRenderer != NULL) {
+            mRendererLooper->unregisterHandler(mRenderer->id());
+        }
+        mRendererLooper->stop();
+        mRendererLooper.clear();
+    }
     mRenderer.clear();
 
     if (mSource != NULL) {
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.h b/media/libmediaplayerservice/nuplayer/NuPlayer.h
index d7c00aa..c04e277 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.h
@@ -125,6 +125,7 @@
     sp<Decoder> mAudioDecoder;
     sp<CCDecoder> mCCDecoder;
     sp<Renderer> mRenderer;
+    sp<ALooper> mRendererLooper;
 
     List<sp<Action> > mDeferredActions;
 
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
index dd73cc4..1b9bafb 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
@@ -26,6 +26,7 @@
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/MediaBuffer.h>
 #include <media/stagefright/MediaCodec.h>
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/MediaErrors.h>
@@ -54,6 +55,22 @@
 NuPlayer::Decoder::~Decoder() {
 }
 
+static
+status_t PostAndAwaitResponse(
+        const sp<AMessage> &msg, sp<AMessage> *response) {
+    status_t err = msg->postAndAwaitResponse(response);
+
+    if (err != OK) {
+        return err;
+    }
+
+    if (!(*response)->findInt32("err", &err)) {
+        err = OK;
+    }
+
+    return err;
+}
+
 void NuPlayer::Decoder::onConfigure(const sp<AMessage> &format) {
     CHECK(mCodec == NULL);
 
@@ -72,8 +89,20 @@
     ALOGV("[%s] onConfigure (surface=%p)", mComponentName.c_str(), surface.get());
 
     mCodec = MediaCodec::CreateByType(mCodecLooper, mime.c_str(), false /* encoder */);
+    int32_t secure = 0;
+    if (format->findInt32("secure", &secure) && secure != 0) {
+        if (mCodec != NULL) {
+            mCodec->getName(&mComponentName);
+            mComponentName.append(".secure");
+            mCodec->release();
+            ALOGI("[%s] creating", mComponentName.c_str());
+            mCodec = MediaCodec::CreateByComponentName(
+                    mCodecLooper, mComponentName.c_str());
+        }
+    }
     if (mCodec == NULL) {
-        ALOGE("Failed to create %s decoder", mime.c_str());
+        ALOGE("Failed to create %s%s decoder",
+                (secure ? "secure " : ""), mime.c_str());
         handleError(UNKNOWN_ERROR);
         return;
     }
@@ -107,6 +136,7 @@
 
     // the following should work after start
     CHECK_EQ((status_t)OK, mCodec->getInputBuffers(&mInputBuffers));
+    releaseAndResetMediaBuffers();
     CHECK_EQ((status_t)OK, mCodec->getOutputBuffers(&mOutputBuffers));
     ALOGV("[%s] got %zu input and %zu output buffers",
             mComponentName.c_str(),
@@ -117,6 +147,18 @@
     mPaused = false;
 }
 
+void NuPlayer::Decoder::releaseAndResetMediaBuffers() {
+    for (size_t i = 0; i < mMediaBuffers.size(); i++) {
+        if (mMediaBuffers[i] != NULL) {
+            mMediaBuffers[i]->release();
+            mMediaBuffers.editItemAt(i) = NULL;
+        }
+    }
+    mMediaBuffers.resize(mInputBuffers.size());
+    mInputBufferIsDequeued.clear();
+    mInputBufferIsDequeued.resize(mInputBuffers.size());
+}
+
 void NuPlayer::Decoder::requestCodecNotification() {
     if (mCodec != NULL) {
         sp<AMessage> reply = new AMessage(kWhatCodecNotify, id());
@@ -141,6 +183,14 @@
     msg->post();
 }
 
+status_t NuPlayer::Decoder::getInputBuffers(Vector<sp<ABuffer> > *buffers) const {
+    sp<AMessage> msg = new AMessage(kWhatGetInputBuffers, id());
+    msg->setPointer("buffers", buffers);
+
+    sp<AMessage> response;
+    return PostAndAwaitResponse(msg, &response);
+}
+
 void NuPlayer::Decoder::handleError(int32_t err)
 {
     sp<AMessage> notify = mNotify->dup();
@@ -163,6 +213,12 @@
 
     CHECK_LT(bufferIx, mInputBuffers.size());
 
+    if (mMediaBuffers[bufferIx] != NULL) {
+        mMediaBuffers[bufferIx]->release();
+        mMediaBuffers.editItemAt(bufferIx) = NULL;
+    }
+    mInputBufferIsDequeued.editItemAt(bufferIx) = true;
+
     sp<AMessage> reply = new AMessage(kWhatInputBufferFilled, id());
     reply->setSize("buffer-ix", bufferIx);
     reply->setInt32("generation", mBufferGeneration);
@@ -183,6 +239,44 @@
 
     sp<ABuffer> buffer;
     bool hasBuffer = msg->findBuffer("buffer", &buffer);
+
+    // handle widevine classic source - that fills an arbitrary input buffer
+    MediaBuffer *mediaBuffer = NULL;
+    if (hasBuffer && buffer->meta()->findPointer(
+            "mediaBuffer", (void **)&mediaBuffer)) {
+        if (mediaBuffer == NULL) {
+            // received no actual buffer
+            ALOGW("[%s] received null MediaBuffer %s",
+                    mComponentName.c_str(), msg->debugString().c_str());
+            buffer = NULL;
+        } else {
+            // likely filled another buffer than we requested: adjust buffer index
+            size_t ix;
+            for (ix = 0; ix < mInputBuffers.size(); ix++) {
+                const sp<ABuffer> &buf = mInputBuffers[ix];
+                if (buf->data() == mediaBuffer->data()) {
+                    // all input buffers are dequeued on start, hence the check
+                    CHECK(mInputBufferIsDequeued[ix]);
+                    ALOGV("[%s] received MediaBuffer for #%zu instead of #%zu",
+                            mComponentName.c_str(), ix, bufferIx);
+
+                    // TRICKY: need buffer for the metadata, so instead, set
+                    // codecBuffer to the same (though incorrect) buffer to
+                    // avoid a memcpy into the codecBuffer
+                    codecBuffer = buffer;
+                    codecBuffer->setRange(
+                            mediaBuffer->range_offset(),
+                            mediaBuffer->range_length());
+                    bufferIx = ix;
+                    break;
+                }
+            }
+            CHECK(ix < mInputBuffers.size());
+        }
+    }
+
+    mInputBufferIsDequeued.editItemAt(bufferIx) = false;
+
     if (buffer == NULL /* includes !hasBuffer */) {
         int32_t streamErr = ERROR_END_OF_STREAM;
         CHECK(msg->findInt32("err", &streamErr) || !hasBuffer);
@@ -236,6 +330,11 @@
                     mComponentName.c_str(), err);
             handleError(err);
         }
+
+        if (mediaBuffer != NULL) {
+            CHECK(mMediaBuffers[bufferIx] == NULL);
+            mMediaBuffers.editItemAt(bufferIx) = mediaBuffer;
+        }
     }
 }
 
@@ -352,6 +451,8 @@
         return;
     }
 
+    releaseAndResetMediaBuffers();
+
     sp<AMessage> notify = mNotify->dup();
     notify->setInt32("what", kWhatFlushCompleted);
     notify->post();
@@ -379,6 +480,8 @@
         mComponentName = "decoder";
     }
 
+    releaseAndResetMediaBuffers();
+
     if (err != OK) {
         ALOGE("failed to release %s (err=%d)", mComponentName.c_str(), err);
         handleError(err);
@@ -403,6 +506,23 @@
             break;
         }
 
+        case kWhatGetInputBuffers:
+        {
+            uint32_t replyID;
+            CHECK(msg->senderAwaitsResponse(&replyID));
+
+            Vector<sp<ABuffer> > *dstBuffers;
+            CHECK(msg->findPointer("buffers", (void **)&dstBuffers));
+
+            dstBuffers->clear();
+            for (size_t i = 0; i < mInputBuffers.size(); i++) {
+                dstBuffers->push(mInputBuffers[i]);
+            }
+
+            (new AMessage)->postReply(replyID);
+            break;
+        }
+
         case kWhatCodecNotify:
         {
             if (!isStaleReply(msg)) {
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h
index 4fa0dbd..c6fc237 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h
@@ -26,6 +26,7 @@
 
 struct ABuffer;
 struct MediaCodec;
+struct MediaBuffer;
 
 struct NuPlayer::Decoder : public AHandler {
     Decoder(const sp<AMessage> &notify,
@@ -34,6 +35,7 @@
     virtual void configure(const sp<AMessage> &format);
     virtual void init();
 
+    status_t getInputBuffers(Vector<sp<ABuffer> > *dstBuffers) const;
     virtual void signalFlush();
     virtual void signalResume();
     virtual void initiateShutdown();
@@ -60,6 +62,7 @@
     enum {
         kWhatCodecNotify        = 'cdcN',
         kWhatConfigure          = 'conf',
+        kWhatGetInputBuffers    = 'gInB',
         kWhatInputBufferFilled  = 'inpF',
         kWhatRenderBuffer       = 'rndr',
         kWhatFlush              = 'flus',
@@ -77,11 +80,14 @@
 
     Vector<sp<ABuffer> > mInputBuffers;
     Vector<sp<ABuffer> > mOutputBuffers;
+    Vector<bool> mInputBufferIsDequeued;
+    Vector<MediaBuffer *> mMediaBuffers;
 
     void handleError(int32_t err);
     bool handleAnInputBuffer();
     bool handleAnOutputBuffer();
 
+    void releaseAndResetMediaBuffers();
     void requestCodecNotification();
     bool isStaleReply(const sp<AMessage> &msg);
 
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
index f520ff7..8592ec2 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
@@ -26,6 +26,8 @@
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/MetaData.h>
 
+#include <inttypes.h>
+
 namespace android {
 
 // static
@@ -502,6 +504,7 @@
         }
     }
 
+    ALOGW_IF(delayUs > 500000, "unusually high delayUs: %" PRId64, delayUs);
     msg->post(delayUs);
 
     mDrainVideoQueuePending = true;
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerSource.h b/media/libmediaplayerservice/nuplayer/NuPlayerSource.h
index 632c4a6..259925f 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerSource.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerSource.h
@@ -21,11 +21,14 @@
 #include "NuPlayer.h"
 
 #include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/MetaData.h>
+#include <utils/Vector.h>
 
 namespace android {
 
 struct ABuffer;
 struct MetaData;
+struct MediaBuffer;
 
 struct NuPlayer::Source : public AHandler {
     enum Flags {
@@ -34,6 +37,7 @@
         FLAG_CAN_SEEK_FORWARD   = 4,  // the "10 sec forward button"
         FLAG_CAN_SEEK           = 8,  // the "seek bar"
         FLAG_DYNAMIC_DURATION   = 16,
+        FLAG_SECURE             = 32,
     };
 
     enum {
@@ -89,6 +93,10 @@
         return INVALID_OPERATION;
     }
 
+    virtual status_t setBuffers(bool /* audio */, Vector<MediaBuffer *> &/* buffers */) {
+        return INVALID_OPERATION;
+    }
+
     virtual bool isRealTime() const {
         return false;
     }
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index 9c64d72..6cb1c64 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -3989,6 +3989,8 @@
 
         if (err == OK) {
             break;
+        } else {
+            ALOGW("Allocating component '%s' failed, try next one.", componentName.c_str());
         }
 
         node = NULL;
@@ -4504,11 +4506,14 @@
 
     submitOutputBuffers();
 
-    // Post the first input buffer.
+    // Post all available input buffers
     CHECK_GT(mCodec->mBuffers[kPortIndexInput].size(), 0u);
-    BufferInfo *info = &mCodec->mBuffers[kPortIndexInput].editItemAt(0);
-
-    postFillThisBuffer(info);
+    for (size_t i = 0; i < mCodec->mBuffers[kPortIndexInput].size(); i++) {
+        BufferInfo *info = &mCodec->mBuffers[kPortIndexInput].editItemAt(i);
+        if (info->mStatus == BufferInfo::OWNED_BY_US) {
+            postFillThisBuffer(info);
+        }
+    }
 
     mActive = true;
 }
diff --git a/media/libstagefright/MPEG4Extractor.cpp b/media/libstagefright/MPEG4Extractor.cpp
index 207acc8..19da6ee 100644
--- a/media/libstagefright/MPEG4Extractor.cpp
+++ b/media/libstagefright/MPEG4Extractor.cpp
@@ -3665,7 +3665,7 @@
 
         uint32_t sampleIndex;
         status_t err = mSampleTable->findSampleAtTime(
-                seekTimeUs * mTimescale / 1000000,
+                seekTimeUs, 1000000, mTimescale,
                 &sampleIndex, findFlags);
 
         if (mode == ReadOptions::SEEK_CLOSEST) {
diff --git a/media/libstagefright/MediaBufferGroup.cpp b/media/libstagefright/MediaBufferGroup.cpp
index 80aae51..6ac6d4a 100644
--- a/media/libstagefright/MediaBufferGroup.cpp
+++ b/media/libstagefright/MediaBufferGroup.cpp
@@ -55,7 +55,8 @@
     mLastBuffer = buffer;
 }
 
-status_t MediaBufferGroup::acquire_buffer(MediaBuffer **out) {
+status_t MediaBufferGroup::acquire_buffer(
+        MediaBuffer **out, bool nonBlocking) {
     Mutex::Autolock autoLock(mLock);
 
     for (;;) {
@@ -70,6 +71,11 @@
             }
         }
 
+        if (nonBlocking) {
+            *out = NULL;
+            return WOULD_BLOCK;
+        }
+
         // All buffers are in use. Block until one of them is returned to us.
         mCondition.wait(mLock);
     }
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index 24fd7ad..7a9cb0b 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -106,6 +106,11 @@
 }
 
 status_t MediaCodec::init(const char *name, bool nameIsType, bool encoder) {
+    // save init parameters for reset
+    mInitName = name;
+    mInitNameIsType = nameIsType;
+    mInitIsEncoder = encoder;
+
     // Current video decoders do not return from OMX_FillThisBuffer
     // quickly, violating the OpenMAX specs, until that is remedied
     // we need to invest in an extra looper to free the main event
@@ -235,6 +240,40 @@
     return PostAndAwaitResponse(msg, &response);
 }
 
+status_t MediaCodec::reset() {
+    /* When external-facing MediaCodec object is created,
+       it is already initialized.  Thus, reset is essentially
+       release() followed by init(), plus clearing the state */
+
+    status_t err = release();
+
+    // unregister handlers
+    if (mCodec != NULL) {
+        if (mCodecLooper != NULL) {
+            mCodecLooper->unregisterHandler(mCodec->id());
+        } else {
+            mLooper->unregisterHandler(mCodec->id());
+        }
+        mCodec = NULL;
+    }
+    mLooper->unregisterHandler(id());
+
+    mFlags = 0;    // clear all flags
+
+    // reset state not reset by setState(UNINITIALIZED)
+    mReplyID = 0;
+    mDequeueInputReplyID = 0;
+    mDequeueOutputReplyID = 0;
+    mDequeueInputTimeoutGeneration = 0;
+    mDequeueOutputTimeoutGeneration = 0;
+    mHaveInputSurface = false;
+
+    if (err == OK) {
+        err = init(mInitName.c_str(), mInitNameIsType, mInitIsEncoder);
+    }
+    return err;
+}
+
 status_t MediaCodec::queueInputBuffer(
         size_t index,
         size_t offset,
@@ -1553,6 +1592,7 @@
         mCrypto.clear();
         setNativeWindow(NULL);
 
+        mInputFormat.clear();
         mOutputFormat.clear();
         mFlags &= ~kFlagOutputFormatChanged;
         mFlags &= ~kFlagOutputBuffersChanged;
@@ -1566,6 +1606,9 @@
     }
 
     if (newState == UNINITIALIZED) {
+        // return any straggling buffers, e.g. if we got here on an error
+        returnBuffersToCodec();
+
         mComponentName.clear();
 
         // The component is gone, mediaserver's probably back up already
diff --git a/media/libstagefright/MediaCodecList.cpp b/media/libstagefright/MediaCodecList.cpp
index cd51582..8f54343 100644
--- a/media/libstagefright/MediaCodecList.cpp
+++ b/media/libstagefright/MediaCodecList.cpp
@@ -21,6 +21,7 @@
 #include <media/stagefright/MediaCodecList.h>
 
 #include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AMessage.h>
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/OMXClient.h>
 #include <media/stagefright/OMXCodec.h>
@@ -79,6 +80,19 @@
                   info->mName.c_str());
 
             mCodecInfos.removeAt(i);
+#if LOG_NDEBUG == 0
+        } else {
+            for (size_t type_ix = 0; type_ix < mTypes.size(); ++type_ix) {
+                uint32_t typeMask = 1ul << mTypes.valueAt(type_ix);
+                if (info->mTypes & typeMask) {
+                    AString mime = mTypes.keyAt(type_ix);
+                    uint32_t bit = mTypes.valueAt(type_ix);
+
+                    ALOGV("%s codec info for %s: %s", info->mName.c_str(), mime.c_str(),
+                            info->mCaps.editValueFor(bit)->debugString().c_str());
+                }
+            }
+#endif
         }
     }
 
@@ -217,6 +231,8 @@
         return;
     }
 
+    bool inType = true;
+
     if (!strcmp(name, "Include")) {
         mInitCheck = includeXMLFile(attrs);
         if (mInitCheck == OK) {
@@ -267,6 +283,26 @@
                 mInitCheck = addQuirk(attrs);
             } else if (!strcmp(name, "Type")) {
                 mInitCheck = addTypeFromAttributes(attrs);
+                mCurrentSection =
+                    (mCurrentSection == SECTION_DECODER
+                            ? SECTION_DECODER_TYPE : SECTION_ENCODER_TYPE);
+            }
+        }
+        inType = false;
+        // fall through
+
+        case SECTION_DECODER_TYPE:
+        case SECTION_ENCODER_TYPE:
+        {
+            CodecInfo *info = &mCodecInfos.editItemAt(mCodecInfos.size() - 1);
+            // ignore limits and features specified outside of type
+            bool outside = !inType && info->mSoleType == 0;
+            if (outside && (!strcmp(name, "Limit") || !strcmp(name, "Feature"))) {
+                ALOGW("ignoring %s specified outside of a Type", name);
+            } else if (!strcmp(name, "Limit")) {
+                mInitCheck = addLimit(attrs);
+            } else if (!strcmp(name, "Feature")) {
+                mInitCheck = addFeature(attrs);
             }
             break;
         }
@@ -300,10 +336,27 @@
             break;
         }
 
+        case SECTION_DECODER_TYPE:
+        case SECTION_ENCODER_TYPE:
+        {
+            if (!strcmp(name, "Type")) {
+                mCurrentSection =
+                    (mCurrentSection == SECTION_DECODER_TYPE
+                            ? SECTION_DECODER : SECTION_ENCODER);
+
+                CodecInfo *info = &mCodecInfos.editItemAt(mCodecInfos.size() - 1);
+                info->mCurrentCaps = NULL;
+            }
+            break;
+        }
+
         case SECTION_DECODER:
         {
             if (!strcmp(name, "MediaCodec")) {
                 mCurrentSection = SECTION_DECODERS;
+
+                CodecInfo *info = &mCodecInfos.editItemAt(mCodecInfos.size() - 1);
+                info->mCurrentCaps = NULL;
             }
             break;
         }
@@ -312,6 +365,9 @@
         {
             if (!strcmp(name, "MediaCodec")) {
                 mCurrentSection = SECTION_ENCODERS;
+
+                CodecInfo *info = &mCodecInfos.editItemAt(mCodecInfos.size() - 1);
+                info->mCurrentCaps = NULL;
             }
             break;
         }
@@ -373,11 +429,16 @@
     CodecInfo *info = &mCodecInfos.editItemAt(mCodecInfos.size() - 1);
     info->mName = name;
     info->mIsEncoder = encoder;
+    info->mSoleType = 0;
     info->mTypes = 0;
     info->mQuirks = 0;
+    info->mCurrentCaps = NULL;
 
     if (type != NULL) {
         addType(type);
+        // if type was specified in attributes, we do not allow
+        // subsequent types
+        info->mSoleType = info->mTypes;
     }
 }
 
@@ -427,6 +488,12 @@
 status_t MediaCodecList::addTypeFromAttributes(const char **attrs) {
     const char *name = NULL;
 
+    CodecInfo *info = &mCodecInfos.editItemAt(mCodecInfos.size() - 1);
+    if (info->mSoleType != 0) {
+        ALOGE("Codec '%s' already had its type specified", info->mName.c_str());
+        return -EINVAL;
+    }
+
     size_t i = 0;
     while (attrs[i] != NULL) {
         if (!strcmp(attrs[i], "name")) {
@@ -469,6 +536,11 @@
 
     CodecInfo *info = &mCodecInfos.editItemAt(mCodecInfos.size() - 1);
     info->mTypes |= 1ul << bit;
+    if (info->mCaps.indexOfKey(bit) < 0) {
+        AMessage *msg = new AMessage();
+        info->mCaps.add(bit, msg);
+    }
+    info->mCurrentCaps = info->mCaps.editValueFor(bit);
 }
 
 ssize_t MediaCodecList::findCodecByType(
@@ -494,6 +566,216 @@
     return -ENOENT;
 }
 
+static status_t limitFoundMissingAttr(AString name, const char *attr, bool found = true) {
+    ALOGE("limit '%s' with %s'%s' attribute", name.c_str(),
+            (found ? "" : "no "), attr);
+    return -EINVAL;
+}
+
+static status_t limitError(AString name, const char *msg) {
+    ALOGE("limit '%s' %s", name.c_str(), msg);
+    return -EINVAL;
+}
+
+static status_t limitInvalidAttr(AString name, const char *attr, AString value) {
+    ALOGE("limit '%s' with invalid '%s' attribute (%s)", name.c_str(),
+            attr, value.c_str());
+    return -EINVAL;
+}
+
+status_t MediaCodecList::addLimit(const char **attrs) {
+    sp<AMessage> msg = new AMessage();
+
+    size_t i = 0;
+    while (attrs[i] != NULL) {
+        if (attrs[i + 1] == NULL) {
+            return -EINVAL;
+        }
+
+        // attributes with values
+        if (!strcmp(attrs[i], "name")
+                || !strcmp(attrs[i], "default")
+                || !strcmp(attrs[i], "in")
+                || !strcmp(attrs[i], "max")
+                || !strcmp(attrs[i], "min")
+                || !strcmp(attrs[i], "range")
+                || !strcmp(attrs[i], "ranges")
+                || !strcmp(attrs[i], "scale")
+                || !strcmp(attrs[i], "value")) {
+            msg->setString(attrs[i], attrs[i + 1]);
+            ++i;
+        } else {
+            return -EINVAL;
+        }
+        ++i;
+    }
+
+    AString name;
+    if (!msg->findString("name", &name)) {
+        ALOGE("limit with no 'name' attribute");
+        return -EINVAL;
+    }
+
+    CodecInfo *info = &mCodecInfos.editItemAt(mCodecInfos.size() - 1);
+
+    // size, blocks, bitrate, frame-rate, blocks-per-second, aspect-ratio: range
+    // quality: range + default + [scale]
+    // complexity: range + default
+    bool found;
+    if (name == "aspect-ratio" || name == "bitrate" || name == "block-count"
+            || name == "blocks-per-second" || name == "complexity"
+            || name == "frame-rate" || name == "quality" || name == "size") {
+        AString min, max;
+        if (msg->findString("min", &min) && msg->findString("max", &max)) {
+            min.append("-");
+            min.append(max);
+            if (msg->contains("range") || msg->contains("value")) {
+                return limitError(name, "has 'min' and 'max' as well as 'range' or "
+                        "'value' attributes");
+            }
+            msg->setString("range", min);
+        } else if (msg->contains("min") || msg->contains("max")) {
+            return limitError(name, "has only 'min' or 'max' attribute");
+        } else if (msg->findString("value", &max)) {
+            min = max;
+            min.append("-");
+            min.append(max);
+            if (msg->contains("range")) {
+                return limitError(name, "has both 'range' and 'value' attributes");
+            }
+            msg->setString("range", min);
+        }
+
+        AString range, scale = "linear", def, in_;
+        if (!msg->findString("range", &range)) {
+            return limitError(name, "with no 'range', 'value' or 'min'/'max' attributes");
+        }
+
+        if ((name == "quality" || name == "complexity") ^
+                (found = msg->findString("default", &def))) {
+            return limitFoundMissingAttr(name, "default", found);
+        }
+        if (name != "quality" && msg->findString("scale", &scale)) {
+            return limitFoundMissingAttr(name, "scale");
+        }
+        if ((name == "aspect-ratio") ^ (found = msg->findString("in", &in_))) {
+            return limitFoundMissingAttr(name, "in", found);
+        }
+
+        if (name == "aspect-ratio") {
+            if (!(in_ == "pixels") && !(in_ == "blocks")) {
+                return limitInvalidAttr(name, "in", in_);
+            }
+            in_.erase(5, 1); // (pixel|block)-aspect-ratio
+            in_.append("-");
+            in_.append(name);
+            name = in_;
+        }
+        if (name == "quality") {
+            info->mCurrentCaps->setString("quality-scale", scale);
+        }
+        if (name == "quality" || name == "complexity") {
+            AString tag = name;
+            tag.append("-default");
+            info->mCurrentCaps->setString(tag.c_str(), def);
+        }
+        AString tag = name;
+        tag.append("-range");
+        info->mCurrentCaps->setString(tag.c_str(), range);
+    } else {
+        AString max, value, ranges;
+        if (msg->contains("default")) {
+            return limitFoundMissingAttr(name, "default");
+        } else if (msg->contains("in")) {
+            return limitFoundMissingAttr(name, "in");
+        } else if ((name == "channel-count") ^
+                (found = msg->findString("max", &max))) {
+            return limitFoundMissingAttr(name, "max", found);
+        } else if (msg->contains("min")) {
+            return limitFoundMissingAttr(name, "min");
+        } else if (msg->contains("range")) {
+            return limitFoundMissingAttr(name, "range");
+        } else if ((name == "sample-rate") ^
+                (found = msg->findString("ranges", &ranges))) {
+            return limitFoundMissingAttr(name, "ranges", found);
+        } else if (msg->contains("scale")) {
+            return limitFoundMissingAttr(name, "scale");
+        } else if ((name == "alignment" || name == "block-size") ^
+                (found = msg->findString("value", &value))) {
+            return limitFoundMissingAttr(name, "value", found);
+        }
+
+        if (max.size()) {
+            AString tag = "max-";
+            tag.append(name);
+            info->mCurrentCaps->setString(tag.c_str(), max);
+        } else if (value.size()) {
+            info->mCurrentCaps->setString(name.c_str(), value);
+        } else if (ranges.size()) {
+            AString tag = name;
+            tag.append("-ranges");
+            info->mCurrentCaps->setString(tag.c_str(), ranges);
+        } else {
+            ALOGW("Ignoring unrecognized limit '%s'", name.c_str());
+        }
+    }
+    return OK;
+}
+
+static bool parseBoolean(const char *s) {
+    if (!strcasecmp(s, "true") || !strcasecmp(s, "yes") || !strcasecmp(s, "y")) {
+        return true;
+    }
+    char *end;
+    unsigned long res = strtoul(s, &end, 10);
+    return *s != '\0' && *end == '\0' && res > 0;
+}
+
+status_t MediaCodecList::addFeature(const char **attrs) {
+    size_t i = 0;
+    const char *name = NULL;
+    int32_t optional = -1;
+    int32_t required = -1;
+
+    while (attrs[i] != NULL) {
+        if (attrs[i + 1] == NULL) {
+            return -EINVAL;
+        }
+
+        // attributes with values
+        if (!strcmp(attrs[i], "name")) {
+            name = attrs[i + 1];
+            ++i;
+        } else if (!strcmp(attrs[i], "optional") || !strcmp(attrs[i], "required")) {
+            int value = (int)parseBoolean(attrs[i + 1]);
+            if (!strcmp(attrs[i], "optional")) {
+                optional = value;
+            } else {
+                required = value;
+            }
+            ++i;
+        } else {
+            return -EINVAL;
+        }
+        ++i;
+    }
+    if (name == NULL) {
+        ALOGE("feature with no 'name' attribute");
+        return -EINVAL;
+    }
+
+    if (optional == required && optional != -1) {
+        ALOGE("feature '%s' is both/neither optional and required", name);
+        return -EINVAL;
+    }
+
+    CodecInfo *info = &mCodecInfos.editItemAt(mCodecInfos.size() - 1);
+    AString tag = "feature-";
+    tag.append(name);
+    info->mCurrentCaps->setInt32(tag.c_str(), (required == 1) || (optional == 0));
+    return OK;
+}
+
 ssize_t MediaCodecList::findCodecByName(const char *name) const {
     for (size_t i = 0; i < mCodecInfos.size(); ++i) {
         const CodecInfo &info = mCodecInfos.itemAt(i);
@@ -571,7 +853,8 @@
         size_t index, const char *type,
         Vector<ProfileLevel> *profileLevels,
         Vector<uint32_t> *colorFormats,
-        uint32_t *flags) const {
+        uint32_t *flags,
+        sp<AMessage> *capabilities) const {
     profileLevels->clear();
     colorFormats->clear();
 
@@ -581,6 +864,13 @@
 
     const CodecInfo &info = mCodecInfos.itemAt(index);
 
+    ssize_t typeIndex = mTypes.indexOfKey(type);
+    if (typeIndex < 0) {
+        return -EINVAL;
+    }
+    // essentially doing valueFor without the CHECK abort
+    typeIndex = mTypes.valueAt(typeIndex);
+
     OMXClient client;
     status_t err = client.connect();
     if (err != OK) {
@@ -611,6 +901,11 @@
 
     *flags = caps.mFlags;
 
+    // TODO this check will be removed once JNI side is merged
+    if (capabilities != NULL) {
+        *capabilities = info.mCaps.valueFor(typeIndex);
+    }
+
     return OK;
 }
 
diff --git a/media/libstagefright/MediaSource.cpp b/media/libstagefright/MediaSource.cpp
index fd0e79c..576471a 100644
--- a/media/libstagefright/MediaSource.cpp
+++ b/media/libstagefright/MediaSource.cpp
@@ -32,6 +32,19 @@
     mOptions = 0;
     mSeekTimeUs = 0;
     mLatenessUs = 0;
+    mNonBlocking = false;
+}
+
+void MediaSource::ReadOptions::setNonBlocking() {
+    mNonBlocking = true;
+}
+
+void MediaSource::ReadOptions::clearNonBlocking() {
+    mNonBlocking = false;
+}
+
+bool MediaSource::ReadOptions::getNonBlocking() const {
+    return mNonBlocking;
 }
 
 void MediaSource::ReadOptions::setSeekTo(int64_t time_us, SeekMode mode) {
diff --git a/media/libstagefright/OggExtractor.cpp b/media/libstagefright/OggExtractor.cpp
index 8c15929..821bd81 100644
--- a/media/libstagefright/OggExtractor.cpp
+++ b/media/libstagefright/OggExtractor.cpp
@@ -320,22 +320,26 @@
     }
 
     size_t left = 0;
-    size_t right = mTableOfContents.size();
-    while (left < right) {
-        size_t center = left / 2 + right / 2 + (left & right & 1);
+    size_t right_plus_one = mTableOfContents.size();
+    while (left < right_plus_one) {
+        size_t center = left + (right_plus_one - left) / 2;
 
         const TOCEntry &entry = mTableOfContents.itemAt(center);
 
         if (timeUs < entry.mTimeUs) {
-            right = center;
+            right_plus_one = center;
         } else if (timeUs > entry.mTimeUs) {
             left = center + 1;
         } else {
-            left = right = center;
+            left = center;
             break;
         }
     }
 
+    if (left == mTableOfContents.size()) {
+        --left;
+    }
+
     const TOCEntry &entry = mTableOfContents.itemAt(left);
 
     ALOGV("seeking to entry %zu / %zu at offset %lld",
diff --git a/media/libstagefright/SampleTable.cpp b/media/libstagefright/SampleTable.cpp
index 9a92805..bad43f2 100644
--- a/media/libstagefright/SampleTable.cpp
+++ b/media/libstagefright/SampleTable.cpp
@@ -520,83 +520,72 @@
 }
 
 status_t SampleTable::findSampleAtTime(
-        uint32_t req_time, uint32_t *sample_index, uint32_t flags) {
+        uint64_t req_time, uint64_t scale_num, uint64_t scale_den,
+        uint32_t *sample_index, uint32_t flags) {
     buildSampleEntriesTable();
 
     uint32_t left = 0;
-    uint32_t right = mNumSampleSizes;
-    while (left < right) {
-        uint32_t center = (left + right) / 2;
-        uint32_t centerTime = mSampleTimeEntries[center].mCompositionTime;
+    uint32_t right_plus_one = mNumSampleSizes;
+    while (left < right_plus_one) {
+        uint32_t center = left + (right_plus_one - left) / 2;
+        uint64_t centerTime =
+            getSampleTime(center, scale_num, scale_den);
 
         if (req_time < centerTime) {
-            right = center;
+            right_plus_one = center;
         } else if (req_time > centerTime) {
             left = center + 1;
         } else {
-            left = center;
-            break;
+            *sample_index = mSampleTimeEntries[center].mSampleIndex;
+            return OK;
         }
     }
 
-    if (left == mNumSampleSizes) {
-        if (flags == kFlagAfter) {
-            return ERROR_OUT_OF_RANGE;
-        }
-
-        --left;
-    }
-
     uint32_t closestIndex = left;
 
+    if (closestIndex == mNumSampleSizes) {
+        if (flags == kFlagAfter) {
+            return ERROR_OUT_OF_RANGE;
+        }
+        flags = kFlagBefore;
+    } else if (closestIndex == 0) {
+        if (flags == kFlagBefore) {
+            // normally we should return out of range, but that is
+            // treated as end-of-stream.  instead return first sample
+            //
+            // return ERROR_OUT_OF_RANGE;
+        }
+        flags = kFlagAfter;
+    }
+
     switch (flags) {
         case kFlagBefore:
         {
-            while (closestIndex > 0
-                    && mSampleTimeEntries[closestIndex].mCompositionTime
-                            > req_time) {
-                --closestIndex;
-            }
+            --closestIndex;
             break;
         }
 
         case kFlagAfter:
         {
-            while (closestIndex + 1 < mNumSampleSizes
-                    && mSampleTimeEntries[closestIndex].mCompositionTime
-                            < req_time) {
-                ++closestIndex;
-            }
+            // nothing to do
             break;
         }
 
         default:
         {
             CHECK(flags == kFlagClosest);
-
-            if (closestIndex > 0) {
-                // Check left neighbour and pick closest.
-                uint32_t absdiff1 =
-                    abs_difference(
-                            mSampleTimeEntries[closestIndex].mCompositionTime,
-                            req_time);
-
-                uint32_t absdiff2 =
-                    abs_difference(
-                            mSampleTimeEntries[closestIndex - 1].mCompositionTime,
-                            req_time);
-
-                if (absdiff1 > absdiff2) {
-                    closestIndex = closestIndex - 1;
-                }
+            // pick closest based on timestamp. use abs_difference for safety
+            if (abs_difference(
+                    getSampleTime(closestIndex, scale_num, scale_den), req_time) >
+                abs_difference(
+                    req_time, getSampleTime(closestIndex - 1, scale_num, scale_den))) {
+                --closestIndex;
             }
-
             break;
         }
     }
 
     *sample_index = mSampleTimeEntries[closestIndex].mSampleIndex;
-
     return OK;
 }
 
@@ -618,109 +607,85 @@
     }
 
     uint32_t left = 0;
-    uint32_t right = mNumSyncSamples;
-    while (left < right) {
-        uint32_t center = left + (right - left) / 2;
+    uint32_t right_plus_one = mNumSyncSamples;
+    while (left < right_plus_one) {
+        uint32_t center = left + (right_plus_one - left) / 2;
         uint32_t x = mSyncSamples[center];
 
         if (start_sample_index < x) {
-            right = center;
+            right_plus_one = center;
         } else if (start_sample_index > x) {
             left = center + 1;
         } else {
-            left = center;
-            break;
+            *sample_index = x;
+            return OK;
         }
     }
+
     if (left == mNumSyncSamples) {
         if (flags == kFlagAfter) {
             ALOGE("tried to find a sync frame after the last one: %d", left);
             return ERROR_OUT_OF_RANGE;
         }
-        left = left - 1;
+        flags = kFlagBefore;
+    }
+    else if (left == 0) {
+        if (flags == kFlagBefore) {
+            ALOGE("tried to find a sync frame before the first one: %d", left);
+
+            // normally we should return out of range, but that is
+            // treated as end-of-stream.  instead seek to first sync
+            //
+            // return ERROR_OUT_OF_RANGE;
+        }
+        flags = kFlagAfter;
     }
 
-    // Now ssi[left] is the sync sample index just before (or at)
-    // start_sample_index.
-    // Also start_sample_index < ssi[left + 1], if left + 1 < mNumSyncSamples.
-
-    uint32_t x = mSyncSamples[left];
-
-    if (left + 1 < mNumSyncSamples) {
-        uint32_t y = mSyncSamples[left + 1];
-
-        // our sample lies between sync samples x and y.
-
-        status_t err = mSampleIterator->seekTo(start_sample_index);
-        if (err != OK) {
-            return err;
-        }
-
-        uint32_t sample_time = mSampleIterator->getSampleTime();
-
-        err = mSampleIterator->seekTo(x);
-        if (err != OK) {
-            return err;
-        }
-        uint32_t x_time = mSampleIterator->getSampleTime();
-
-        err = mSampleIterator->seekTo(y);
-        if (err != OK) {
-            return err;
-        }
-
-        uint32_t y_time = mSampleIterator->getSampleTime();
-
-        if (abs_difference(x_time, sample_time)
-                > abs_difference(y_time, sample_time)) {
-            // Pick the sync sample closest (timewise) to the start-sample.
-            x = y;
-            ++left;
-        }
-    }
-
+    // Now ssi[left - 1] <(=) start_sample_index <= ssi[left]
     switch (flags) {
         case kFlagBefore:
         {
-            if (x > start_sample_index) {
-                CHECK(left > 0);
-
-                x = mSyncSamples[left - 1];
-
-                if (x > start_sample_index) {
-                    // The table of sync sample indices was not sorted
-                    // properly.
-                    return ERROR_MALFORMED;
-                }
-            }
+            --left;
             break;
         }
-
         case kFlagAfter:
         {
-            if (x < start_sample_index) {
-                if (left + 1 >= mNumSyncSamples) {
-                    return ERROR_OUT_OF_RANGE;
-                }
-
-                x = mSyncSamples[left + 1];
-
-                if (x < start_sample_index) {
-                    // The table of sync sample indices was not sorted
-                    // properly.
-                    return ERROR_MALFORMED;
-                }
-            }
-
+            // nothing to do
             break;
         }
-
         default:
+        {
+            // this route is not used, but implement it nonetheless
+            CHECK(flags == kFlagClosest);
+
+            status_t err = mSampleIterator->seekTo(start_sample_index);
+            if (err != OK) {
+                return err;
+            }
+            uint32_t sample_time = mSampleIterator->getSampleTime();
+
+            err = mSampleIterator->seekTo(mSyncSamples[left]);
+            if (err != OK) {
+                return err;
+            }
+            uint32_t upper_time = mSampleIterator->getSampleTime();
+
+            err = mSampleIterator->seekTo(mSyncSamples[left - 1]);
+            if (err != OK) {
+                return err;
+            }
+            uint32_t lower_time = mSampleIterator->getSampleTime();
+
+            // use abs_difference for safety
+            if (abs_difference(upper_time, sample_time) >
+                abs_difference(sample_time, lower_time)) {
+                --left;
+            }
             break;
+        }
     }
 
-    *sample_index = x;
-
+    *sample_index = mSyncSamples[left];
     return OK;
 }
 
diff --git a/media/libstagefright/foundation/AMessage.cpp b/media/libstagefright/foundation/AMessage.cpp
index dc42f91..d268aa4 100644
--- a/media/libstagefright/foundation/AMessage.cpp
+++ b/media/libstagefright/foundation/AMessage.cpp
@@ -127,6 +127,20 @@
     return NULL;
 }
 
+bool AMessage::contains(const char *name) const {
+    name = AAtomizer::Atomize(name);
+
+    for (size_t i = 0; i < mNumItems; ++i) {
+        const Item *item = &mItems[i];
+
+        if (item->mName == name) {
+            return true;
+        }
+    }
+
+    return false;
+}
+
 #define BASIC_TYPE(NAME,FIELDNAME,TYPENAME)                             \
 void AMessage::set##NAME(const char *name, TYPENAME value) {            \
     Item *item = allocateItem(name);                                    \
@@ -160,6 +174,11 @@
     item->u.stringValue = new AString(s, len < 0 ? strlen(s) : len);
 }
 
+void AMessage::setString(
+        const char *name, const AString &s) {
+    setString(name, s.c_str(), s.size());
+}
+
 void AMessage::setObjectInternal(
         const char *name, const sp<RefBase> &obj, Type type) {
     Item *item = allocateItem(name);
diff --git a/media/libstagefright/httplive/M3UParser.cpp b/media/libstagefright/httplive/M3UParser.cpp
index 281e0da..efd852c 100644
--- a/media/libstagefright/httplive/M3UParser.cpp
+++ b/media/libstagefright/httplive/M3UParser.cpp
@@ -413,6 +413,8 @@
         // Base URL must be absolute
         return false;
     }
+    const size_t schemeEnd = (strstr(baseURL, "//") - baseURL) + 2;
+    CHECK(schemeEnd == 7 || schemeEnd == 8);
 
     if (!strncasecmp("http://", url, 7) || !strncasecmp("https://", url, 8)) {
         // "url" is already an absolute URL, ignore base URL.
@@ -457,7 +459,7 @@
 
         // Check whether the found slash actually is part of the path
         // and not part of the "http://".
-        if (end > 6) {
+        if (end >= schemeEnd) {
             out->setTo(baseURL, end);
         } else {
             out->setTo(baseURL);
@@ -728,6 +730,9 @@
 
             key.tolower();
             const AString &codecs = unquoteString(val);
+            if (meta->get() == NULL) {
+                *meta = new AMessage;
+            }
             (*meta)->setString(key.c_str(), codecs.c_str());
         } else if (!strcasecmp("audio", key.c_str())
                 || !strcasecmp("video", key.c_str())
@@ -751,6 +756,9 @@
             }
 
             key.tolower();
+            if (meta->get() == NULL) {
+                *meta = new AMessage;
+            }
             (*meta)->setString(key.c_str(), groupID.c_str());
         }
     }
diff --git a/media/libstagefright/include/SampleTable.h b/media/libstagefright/include/SampleTable.h
index fe146f2..d06df7b 100644
--- a/media/libstagefright/include/SampleTable.h
+++ b/media/libstagefright/include/SampleTable.h
@@ -75,7 +75,8 @@
         kFlagClosest
     };
     status_t findSampleAtTime(
-            uint32_t req_time, uint32_t *sample_index, uint32_t flags);
+            uint64_t req_time, uint64_t scale_num, uint64_t scale_den,
+            uint32_t *sample_index, uint32_t flags);
 
     status_t findSyncSampleNear(
             uint32_t start_sample_index, uint32_t *sample_index,
@@ -138,6 +139,13 @@
 
     friend struct SampleIterator;
 
+    // normally we don't round
+    inline uint64_t getSampleTime(
+            size_t sample_index, uint64_t scale_num, uint64_t scale_den) const {
+        return (mSampleTimeEntries[sample_index].mCompositionTime
+            * scale_num) / scale_den;
+    }
+
     status_t getSampleSize_l(uint32_t sample_index, size_t *sample_size);
     uint32_t getCompositionTimeOffset(uint32_t sampleIndex);
 
diff --git a/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp b/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp
index 871824a..a0319ab 100644
--- a/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp
+++ b/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp
@@ -230,6 +230,11 @@
             int32_t oldDiscontinuityType;
             if (!oldBuffer->meta()->findInt32(
                         "discontinuity", &oldDiscontinuityType)) {
+                MediaBuffer *mbuf = NULL;
+                oldBuffer->meta()->findPointer("mediaBuffer", (void**)&mbuf);
+                if (mbuf != NULL) {
+                    mbuf->release();
+                }
                 it = mBuffers.erase(it);
                 continue;
             }
diff --git a/media/libstagefright/omx/GraphicBufferSource.cpp b/media/libstagefright/omx/GraphicBufferSource.cpp
index 67e6d7b..fad6c33 100644
--- a/media/libstagefright/omx/GraphicBufferSource.cpp
+++ b/media/libstagefright/omx/GraphicBufferSource.cpp
@@ -75,8 +75,7 @@
     BufferQueue::createBufferQueue(&mProducer, &mConsumer);
     mConsumer->setConsumerName(name);
     mConsumer->setDefaultBufferSize(bufferWidth, bufferHeight);
-    mConsumer->setConsumerUsageBits(GRALLOC_USAGE_HW_VIDEO_ENCODER |
-            GRALLOC_USAGE_HW_TEXTURE);
+    mConsumer->setConsumerUsageBits(GRALLOC_USAGE_HW_VIDEO_ENCODER);
 
     mInitCheck = mConsumer->setMaxAcquiredBufferCount(bufferCount);
     if (mInitCheck != NO_ERROR) {
diff --git a/media/libstagefright/omx/OMX.cpp b/media/libstagefright/omx/OMX.cpp
index 22b12d9..cc4770a 100644
--- a/media/libstagefright/omx/OMX.cpp
+++ b/media/libstagefright/omx/OMX.cpp
@@ -233,7 +233,7 @@
             instance, &handle);
 
     if (err != OMX_ErrorNone) {
-        ALOGV("FAILED to allocate omx component '%s'", name);
+        ALOGE("FAILED to allocate omx component '%s'", name);
 
         instance->onGetHandleFailed();
 
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 5fed0c1..1ad6285 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -1820,7 +1820,8 @@
                                           audio_devices_t *pDevices,
                                           uint32_t *pSamplingRate,
                                           audio_format_t *pFormat,
-                                          audio_channel_mask_t *pChannelMask)
+                                          audio_channel_mask_t *pChannelMask,
+                                          audio_input_flags_t flags)
 {
     struct audio_config config;
     memset(&config, 0, sizeof(config));
@@ -1848,13 +1849,14 @@
 
     audio_stream_in_t *inStream = NULL;
     status_t status = inHwHal->open_input_stream(inHwHal, id, *pDevices, &config,
-                                        &inStream);
+                                        &inStream, flags);
     ALOGV("openInput() openInputStream returned input %p, SamplingRate %d, Format %#x, Channels %x, "
-            "status %d",
+            "flags %#x, status %d",
             inStream,
             config.sample_rate,
             config.format,
             config.channel_mask,
+            flags,
             status);
 
     // If the input could not be opened with the requested parameters and we can handle the
@@ -1868,7 +1870,7 @@
         // FIXME describe the change proposed by HAL (save old values so we can log them here)
         ALOGV("openInput() reopening with proposed sampling rate and channel mask");
         inStream = NULL;
-        status = inHwHal->open_input_stream(inHwHal, id, *pDevices, &config, &inStream);
+        status = inHwHal->open_input_stream(inHwHal, id, *pDevices, &config, &inStream, flags);
         // FIXME log this new status; HAL should not propose any further changes
     }
 
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index be19554..bae18fd 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -179,7 +179,8 @@
                                         audio_devices_t *pDevices,
                                         uint32_t *pSamplingRate,
                                         audio_format_t *pFormat,
-                                        audio_channel_mask_t *pChannelMask);
+                                        audio_channel_mask_t *pChannelMask,
+                                        audio_input_flags_t flags);
 
     virtual status_t closeInput(audio_io_handle_t input);
 
diff --git a/services/audioflinger/AudioMixer.cpp b/services/audioflinger/AudioMixer.cpp
index e57cb8a..529f2af 100644
--- a/services/audioflinger/AudioMixer.cpp
+++ b/services/audioflinger/AudioMixer.cpp
@@ -62,6 +62,10 @@
 #define ALOGVV(a...) do { } while (0)
 #endif
 
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(x) (sizeof(x)/sizeof((x)[0]))
+#endif
+
 // Set kUseNewMixer to true to use the new mixer engine. Otherwise the
 // original code will be used.  This is false for now.
 static const bool kUseNewMixer = false;
@@ -71,52 +75,12 @@
 // because of downmix/upmix support.
 static const bool kUseFloat = true;
 
+// Set to default copy buffer size in frames for input processing.
+static const size_t kCopyBufferFrameCount = 256;
+
 namespace android {
 
 // ----------------------------------------------------------------------------
-AudioMixer::DownmixerBufferProvider::DownmixerBufferProvider() : AudioBufferProvider(),
-        mTrackBufferProvider(NULL), mDownmixHandle(NULL)
-{
-}
-
-AudioMixer::DownmixerBufferProvider::~DownmixerBufferProvider()
-{
-    ALOGV("AudioMixer deleting DownmixerBufferProvider (%p)", this);
-    EffectRelease(mDownmixHandle);
-}
-
-status_t AudioMixer::DownmixerBufferProvider::getNextBuffer(AudioBufferProvider::Buffer *pBuffer,
-        int64_t pts) {
-    //ALOGV("DownmixerBufferProvider::getNextBuffer()");
-    if (mTrackBufferProvider != NULL) {
-        status_t res = mTrackBufferProvider->getNextBuffer(pBuffer, pts);
-        if (res == OK) {
-            mDownmixConfig.inputCfg.buffer.frameCount = pBuffer->frameCount;
-            mDownmixConfig.inputCfg.buffer.raw = pBuffer->raw;
-            mDownmixConfig.outputCfg.buffer.frameCount = pBuffer->frameCount;
-            mDownmixConfig.outputCfg.buffer.raw = mDownmixConfig.inputCfg.buffer.raw;
-            // in-place so overwrite the buffer contents, has been set in prepareTrackForDownmix()
-            //mDownmixConfig.outputCfg.accessMode = EFFECT_BUFFER_ACCESS_WRITE;
-
-            res = (*mDownmixHandle)->process(mDownmixHandle,
-                    &mDownmixConfig.inputCfg.buffer, &mDownmixConfig.outputCfg.buffer);
-            //ALOGV("getNextBuffer is downmixing");
-        }
-        return res;
-    } else {
-        ALOGE("DownmixerBufferProvider::getNextBuffer() error: NULL track buffer provider");
-        return NO_INIT;
-    }
-}
-
-void AudioMixer::DownmixerBufferProvider::releaseBuffer(AudioBufferProvider::Buffer *pBuffer) {
-    //ALOGV("DownmixerBufferProvider::releaseBuffer()");
-    if (mTrackBufferProvider != NULL) {
-        mTrackBufferProvider->releaseBuffer(pBuffer);
-    } else {
-        ALOGE("DownmixerBufferProvider::releaseBuffer() error: NULL track buffer provider");
-    }
-}
 
 template <typename T>
 T min(const T& a, const T& b)
@@ -124,102 +88,289 @@
     return a < b ? a : b;
 }
 
-AudioMixer::ReformatBufferProvider::ReformatBufferProvider(int32_t channels,
-        audio_format_t inputFormat, audio_format_t outputFormat) :
-        mTrackBufferProvider(NULL),
-        mChannels(channels),
-        mInputFormat(inputFormat),
-        mOutputFormat(outputFormat),
-        mInputFrameSize(channels * audio_bytes_per_sample(inputFormat)),
-        mOutputFrameSize(channels * audio_bytes_per_sample(outputFormat)),
-        mOutputData(NULL),
-        mOutputCount(0),
+AudioMixer::CopyBufferProvider::CopyBufferProvider(size_t inputFrameSize,
+        size_t outputFrameSize, size_t bufferFrameCount) :
+        mInputFrameSize(inputFrameSize),
+        mOutputFrameSize(outputFrameSize),
+        mLocalBufferFrameCount(bufferFrameCount),
+        mLocalBufferData(NULL),
         mConsumed(0)
 {
-    ALOGV("ReformatBufferProvider(%p)(%d, %#x, %#x)", this, channels, inputFormat, outputFormat);
-    if (requiresInternalBuffers()) {
-        mOutputCount = 256;
-        (void)posix_memalign(&mOutputData, 32, mOutputCount * mOutputFrameSize);
+    ALOGV("CopyBufferProvider(%p)(%zu, %zu, %zu)", this,
+            inputFrameSize, outputFrameSize, bufferFrameCount);
+    LOG_ALWAYS_FATAL_IF(inputFrameSize < outputFrameSize && bufferFrameCount == 0,
+            "Requires local buffer if inputFrameSize(%d) < outputFrameSize(%d)",
+            inputFrameSize, outputFrameSize);
+    if (mLocalBufferFrameCount) {
+        (void)posix_memalign(&mLocalBufferData, 32, mLocalBufferFrameCount * mOutputFrameSize);
     }
     mBuffer.frameCount = 0;
 }
 
-AudioMixer::ReformatBufferProvider::~ReformatBufferProvider()
+AudioMixer::CopyBufferProvider::~CopyBufferProvider()
 {
-    ALOGV("~ReformatBufferProvider(%p)", this);
+    ALOGV("~CopyBufferProvider(%p)", this);
     if (mBuffer.frameCount != 0) {
         mTrackBufferProvider->releaseBuffer(&mBuffer);
     }
-    free(mOutputData);
+    free(mLocalBufferData);
 }
 
-status_t AudioMixer::ReformatBufferProvider::getNextBuffer(AudioBufferProvider::Buffer *pBuffer,
-        int64_t pts) {
-    //ALOGV("ReformatBufferProvider(%p)::getNextBuffer(%p (%zu), %lld)",
+status_t AudioMixer::CopyBufferProvider::getNextBuffer(AudioBufferProvider::Buffer *pBuffer,
+        int64_t pts)
+{
+    //ALOGV("CopyBufferProvider(%p)::getNextBuffer(%p (%zu), %lld)",
     //        this, pBuffer, pBuffer->frameCount, pts);
-    if (!requiresInternalBuffers()) {
+    if (mLocalBufferFrameCount == 0) {
         status_t res = mTrackBufferProvider->getNextBuffer(pBuffer, pts);
         if (res == OK) {
-            memcpy_by_audio_format(pBuffer->raw, mOutputFormat, pBuffer->raw, mInputFormat,
-                    pBuffer->frameCount * mChannels);
+            copyFrames(pBuffer->raw, pBuffer->raw, pBuffer->frameCount);
         }
         return res;
     }
     if (mBuffer.frameCount == 0) {
         mBuffer.frameCount = pBuffer->frameCount;
         status_t res = mTrackBufferProvider->getNextBuffer(&mBuffer, pts);
-        // TODO: Track down a bug in the upstream provider
-        // LOG_ALWAYS_FATAL_IF(res == OK && mBuffer.frameCount == 0,
-        //        "ReformatBufferProvider::getNextBuffer():"
-        //        " Invalid zero framecount returned from getNextBuffer()");
-        if (res != OK || mBuffer.frameCount == 0) {
+        // At one time an upstream buffer provider had
+        // res == OK and mBuffer.frameCount == 0, doesn't seem to happen now 7/18/2014.
+        //
+        // By API spec, if res != OK, then mBuffer.frameCount == 0.
+        // but there may be improper implementations.
+        ALOG_ASSERT(res == OK || mBuffer.frameCount == 0);
+        if (res != OK || mBuffer.frameCount == 0) { // not needed by API spec, but to be safe.
             pBuffer->raw = NULL;
             pBuffer->frameCount = 0;
             return res;
         }
+        mConsumed = 0;
     }
     ALOG_ASSERT(mConsumed < mBuffer.frameCount);
-    size_t count = min(mOutputCount, mBuffer.frameCount - mConsumed);
+    size_t count = min(mLocalBufferFrameCount, mBuffer.frameCount - mConsumed);
     count = min(count, pBuffer->frameCount);
-    pBuffer->raw = mOutputData;
+    pBuffer->raw = mLocalBufferData;
     pBuffer->frameCount = count;
-    //ALOGV("reformatting %d frames from %#x to %#x, %d chan",
-    //        pBuffer->frameCount, mInputFormat, mOutputFormat, mChannels);
-    memcpy_by_audio_format(pBuffer->raw, mOutputFormat,
-            (uint8_t*)mBuffer.raw + mConsumed * mInputFrameSize, mInputFormat,
-            pBuffer->frameCount * mChannels);
+    copyFrames(pBuffer->raw, (uint8_t*)mBuffer.raw + mConsumed * mInputFrameSize,
+            pBuffer->frameCount);
     return OK;
 }
 
-void AudioMixer::ReformatBufferProvider::releaseBuffer(AudioBufferProvider::Buffer *pBuffer) {
-    //ALOGV("ReformatBufferProvider(%p)::releaseBuffer(%p(%zu))",
+void AudioMixer::CopyBufferProvider::releaseBuffer(AudioBufferProvider::Buffer *pBuffer)
+{
+    //ALOGV("CopyBufferProvider(%p)::releaseBuffer(%p(%zu))",
     //        this, pBuffer, pBuffer->frameCount);
-    if (!requiresInternalBuffers()) {
+    if (mLocalBufferFrameCount == 0) {
         mTrackBufferProvider->releaseBuffer(pBuffer);
         return;
     }
     // LOG_ALWAYS_FATAL_IF(pBuffer->frameCount == 0, "Invalid framecount");
     mConsumed += pBuffer->frameCount; // TODO: update for efficiency to reuse existing content
     if (mConsumed != 0 && mConsumed >= mBuffer.frameCount) {
-        mConsumed = 0;
         mTrackBufferProvider->releaseBuffer(&mBuffer);
-        // ALOG_ASSERT(mBuffer.frameCount == 0);
+        ALOG_ASSERT(mBuffer.frameCount == 0);
     }
     pBuffer->raw = NULL;
     pBuffer->frameCount = 0;
 }
 
-void AudioMixer::ReformatBufferProvider::reset() {
+void AudioMixer::CopyBufferProvider::reset()
+{
     if (mBuffer.frameCount != 0) {
         mTrackBufferProvider->releaseBuffer(&mBuffer);
     }
     mConsumed = 0;
 }
 
-// ----------------------------------------------------------------------------
-bool AudioMixer::sIsMultichannelCapable = false;
+AudioMixer::DownmixerBufferProvider::DownmixerBufferProvider(
+        audio_channel_mask_t inputChannelMask,
+        audio_channel_mask_t outputChannelMask, audio_format_t format,
+        uint32_t sampleRate, int32_t sessionId, size_t bufferFrameCount) :
+        CopyBufferProvider(
+            audio_bytes_per_sample(format) * audio_channel_count_from_out_mask(inputChannelMask),
+            audio_bytes_per_sample(format) * audio_channel_count_from_out_mask(outputChannelMask),
+            bufferFrameCount)  // set bufferFrameCount to 0 to do in-place
+{
+    ALOGV("DownmixerBufferProvider(%p)(%#x, %#x, %#x %u %d)",
+            this, inputChannelMask, outputChannelMask, format,
+            sampleRate, sessionId);
+    if (!sIsMultichannelCapable
+            || EffectCreate(&sDwnmFxDesc.uuid,
+                    sessionId,
+                    SESSION_ID_INVALID_AND_IGNORED,
+                    &mDownmixHandle) != 0) {
+         ALOGE("DownmixerBufferProvider() error creating downmixer effect");
+         mDownmixHandle = NULL;
+         return;
+     }
+     // channel input configuration will be overridden per-track
+     mDownmixConfig.inputCfg.channels = inputChannelMask;   // FIXME: Should be bits
+     mDownmixConfig.outputCfg.channels = outputChannelMask; // FIXME: should be bits
+     mDownmixConfig.inputCfg.format = format;
+     mDownmixConfig.outputCfg.format = format;
+     mDownmixConfig.inputCfg.samplingRate = sampleRate;
+     mDownmixConfig.outputCfg.samplingRate = sampleRate;
+     mDownmixConfig.inputCfg.accessMode = EFFECT_BUFFER_ACCESS_READ;
+     mDownmixConfig.outputCfg.accessMode = EFFECT_BUFFER_ACCESS_WRITE;
+     // input and output buffer provider, and frame count will not be used as the downmix effect
+     // process() function is called directly (see DownmixerBufferProvider::getNextBuffer())
+     mDownmixConfig.inputCfg.mask = EFFECT_CONFIG_SMP_RATE | EFFECT_CONFIG_CHANNELS |
+             EFFECT_CONFIG_FORMAT | EFFECT_CONFIG_ACC_MODE;
+     mDownmixConfig.outputCfg.mask = mDownmixConfig.inputCfg.mask;
 
-effect_descriptor_t AudioMixer::sDwnmFxDesc;
+     int cmdStatus;
+     uint32_t replySize = sizeof(int);
+
+     // Configure downmixer
+     status_t status = (*mDownmixHandle)->command(mDownmixHandle,
+             EFFECT_CMD_SET_CONFIG /*cmdCode*/, sizeof(effect_config_t) /*cmdSize*/,
+             &mDownmixConfig /*pCmdData*/,
+             &replySize, &cmdStatus /*pReplyData*/);
+     if (status != 0 || cmdStatus != 0) {
+         ALOGE("DownmixerBufferProvider() error %d cmdStatus %d while configuring downmixer",
+                 status, cmdStatus);
+         EffectRelease(mDownmixHandle);
+         mDownmixHandle = NULL;
+         return;
+     }
+
+     // Enable downmixer
+     replySize = sizeof(int);
+     status = (*mDownmixHandle)->command(mDownmixHandle,
+             EFFECT_CMD_ENABLE /*cmdCode*/, 0 /*cmdSize*/, NULL /*pCmdData*/,
+             &replySize, &cmdStatus /*pReplyData*/);
+     if (status != 0 || cmdStatus != 0) {
+         ALOGE("DownmixerBufferProvider() error %d cmdStatus %d while enabling downmixer",
+                 status, cmdStatus);
+         EffectRelease(mDownmixHandle);
+         mDownmixHandle = NULL;
+         return;
+     }
+
+     // Set downmix type
+     // parameter size rounded for padding on 32bit boundary
+     const int psizePadded = ((sizeof(downmix_params_t) - 1)/sizeof(int) + 1) * sizeof(int);
+     const int downmixParamSize =
+             sizeof(effect_param_t) + psizePadded + sizeof(downmix_type_t);
+     effect_param_t * const param = (effect_param_t *) malloc(downmixParamSize);
+     param->psize = sizeof(downmix_params_t);
+     const downmix_params_t downmixParam = DOWNMIX_PARAM_TYPE;
+     memcpy(param->data, &downmixParam, param->psize);
+     const downmix_type_t downmixType = DOWNMIX_TYPE_FOLD;
+     param->vsize = sizeof(downmix_type_t);
+     memcpy(param->data + psizePadded, &downmixType, param->vsize);
+     replySize = sizeof(int);
+     status = (*mDownmixHandle)->command(mDownmixHandle,
+             EFFECT_CMD_SET_PARAM /* cmdCode */, downmixParamSize /* cmdSize */,
+             param /*pCmdData*/, &replySize, &cmdStatus /*pReplyData*/);
+     free(param);
+     if (status != 0 || cmdStatus != 0) {
+         ALOGE("DownmixerBufferProvider() error %d cmdStatus %d while setting downmix type",
+                 status, cmdStatus);
+         EffectRelease(mDownmixHandle);
+         mDownmixHandle = NULL;
+         return;
+     }
+     ALOGV("DownmixerBufferProvider() downmix type set to %d", (int) downmixType);
+}
+
+AudioMixer::DownmixerBufferProvider::~DownmixerBufferProvider()
+{
+    ALOGV("~DownmixerBufferProvider (%p)", this);
+    EffectRelease(mDownmixHandle);
+    mDownmixHandle = NULL;
+}
+
+void AudioMixer::DownmixerBufferProvider::copyFrames(void *dst, const void *src, size_t frames)
+{
+    mDownmixConfig.inputCfg.buffer.frameCount = frames;
+    mDownmixConfig.inputCfg.buffer.raw = const_cast<void *>(src);
+    mDownmixConfig.outputCfg.buffer.frameCount = frames;
+    mDownmixConfig.outputCfg.buffer.raw = dst;
+    // may be in-place if src == dst.
+    status_t res = (*mDownmixHandle)->process(mDownmixHandle,
+            &mDownmixConfig.inputCfg.buffer, &mDownmixConfig.outputCfg.buffer);
+    ALOGE_IF(res != OK, "DownmixBufferProvider error %d", res);
+}
+
+/* call once in a pthread_once handler. */
+/*static*/ status_t AudioMixer::DownmixerBufferProvider::init()
+{
+    // find multichannel downmix effect if we have to play multichannel content
+    uint32_t numEffects = 0;
+    int ret = EffectQueryNumberEffects(&numEffects);
+    if (ret != 0) {
+        ALOGE("AudioMixer() error %d querying number of effects", ret);
+        return NO_INIT;
+    }
+    ALOGV("EffectQueryNumberEffects() numEffects=%d", numEffects);
+
+    for (uint32_t i = 0 ; i < numEffects ; i++) {
+        if (EffectQueryEffect(i, &sDwnmFxDesc) == 0) {
+            ALOGV("effect %d is called %s", i, sDwnmFxDesc.name);
+            if (memcmp(&sDwnmFxDesc.type, EFFECT_UIID_DOWNMIX, sizeof(effect_uuid_t)) == 0) {
+                ALOGI("found effect \"%s\" from %s",
+                        sDwnmFxDesc.name, sDwnmFxDesc.implementor);
+                sIsMultichannelCapable = true;
+                break;
+            }
+        }
+    }
+    ALOGW_IF(!sIsMultichannelCapable, "unable to find downmix effect");
+    return NO_INIT;
+}
+
+/*static*/ bool AudioMixer::DownmixerBufferProvider::sIsMultichannelCapable = false;
+/*static*/ effect_descriptor_t AudioMixer::DownmixerBufferProvider::sDwnmFxDesc;
+
+AudioMixer::RemixBufferProvider::RemixBufferProvider(audio_channel_mask_t inputChannelMask,
+        audio_channel_mask_t outputChannelMask, audio_format_t format,
+        size_t bufferFrameCount) :
+        CopyBufferProvider(
+                audio_bytes_per_sample(format)
+                    * audio_channel_count_from_out_mask(inputChannelMask),
+                audio_bytes_per_sample(format)
+                    * audio_channel_count_from_out_mask(outputChannelMask),
+                bufferFrameCount),
+        mFormat(format),
+        mSampleSize(audio_bytes_per_sample(format)),
+        mInputChannels(audio_channel_count_from_out_mask(inputChannelMask)),
+        mOutputChannels(audio_channel_count_from_out_mask(outputChannelMask))
+{
+    ALOGV("RemixBufferProvider(%p)(%#x, %#x, %#x) %d %d",
+            this, format, inputChannelMask, outputChannelMask,
+            mInputChannels, mOutputChannels);
+    // TODO: consider channel representation in index array formulation
+    // We ignore channel representation, and just use the bits.
+    memcpy_by_index_array_initialization(mIdxAry, ARRAY_SIZE(mIdxAry),
+            audio_channel_mask_get_bits(outputChannelMask),
+            audio_channel_mask_get_bits(inputChannelMask));
+}
+
+void AudioMixer::RemixBufferProvider::copyFrames(void *dst, const void *src, size_t frames)
+{
+    memcpy_by_index_array(dst, mOutputChannels,
+            src, mInputChannels, mIdxAry, mSampleSize, frames);
+}
+
+AudioMixer::ReformatBufferProvider::ReformatBufferProvider(int32_t channels,
+        audio_format_t inputFormat, audio_format_t outputFormat,
+        size_t bufferFrameCount) :
+        CopyBufferProvider(
+            channels * audio_bytes_per_sample(inputFormat),
+            channels * audio_bytes_per_sample(outputFormat),
+            bufferFrameCount),
+        mChannels(channels),
+        mInputFormat(inputFormat),
+        mOutputFormat(outputFormat)
+{
+    ALOGV("ReformatBufferProvider(%p)(%d, %#x, %#x)", this, channels, inputFormat, outputFormat);
+}
+
+void AudioMixer::ReformatBufferProvider::copyFrames(void *dst, const void *src, size_t frames)
+{
+    memcpy_by_audio_format(dst, mOutputFormat, src, mInputFormat, frames * mChannels);
+}
+
+// ----------------------------------------------------------------------------
 
 // Ensure mConfiguredNames bitmask is initialized properly on all architectures.
 // The value of 1 << x is undefined in C when x >= 32.
@@ -258,6 +409,7 @@
     for (unsigned i=0 ; i < MAX_NUM_TRACKS ; i++) {
         t->resampler = NULL;
         t->downmixerBufferProvider = NULL;
+        t->mReformatBufferProvider = NULL;
         t++;
     }
 
@@ -269,6 +421,7 @@
     for (unsigned i=0 ; i < MAX_NUM_TRACKS ; i++) {
         delete t->resampler;
         delete t->downmixerBufferProvider;
+        delete t->mReformatBufferProvider;
         t++;
     }
     delete [] mState.outputTemp;
@@ -409,95 +562,20 @@
 
     // discard the previous downmixer if there was one
     unprepareTrackForDownmix(pTrack, trackName);
+    if (DownmixerBufferProvider::isMultichannelCapable()) {
+        DownmixerBufferProvider* pDbp = new DownmixerBufferProvider(pTrack->channelMask,
+                /* pTrack->mMixerChannelMask */ audio_channel_out_mask_from_count(2),
+                /* pTrack->mMixerInFormat */ AUDIO_FORMAT_PCM_16_BIT,
+                pTrack->sampleRate, pTrack->sessionId, kCopyBufferFrameCount);
 
-    DownmixerBufferProvider* pDbp = new DownmixerBufferProvider();
-    int32_t status;
-
-    if (!sIsMultichannelCapable) {
-        ALOGE("prepareTrackForDownmix(%d) fails: mixer doesn't support multichannel content",
-                trackName);
-        goto noDownmixForActiveTrack;
+        if (pDbp->isValid()) { // if constructor completed properly
+            pTrack->mMixerInFormat = AUDIO_FORMAT_PCM_16_BIT; // PCM 16 bit required for downmix
+            pTrack->downmixerBufferProvider = pDbp;
+            reconfigureBufferProviders(pTrack);
+            return NO_ERROR;
+        }
+        delete pDbp;
     }
-
-    if (EffectCreate(&sDwnmFxDesc.uuid,
-            pTrack->sessionId /*sessionId*/, -2 /*ioId not relevant here, using random value*/,
-            &pDbp->mDownmixHandle/*pHandle*/) != 0) {
-        ALOGE("prepareTrackForDownmix(%d) fails: error creating downmixer effect", trackName);
-        goto noDownmixForActiveTrack;
-    }
-
-    // channel input configuration will be overridden per-track
-    pDbp->mDownmixConfig.inputCfg.channels = pTrack->channelMask;
-    pDbp->mDownmixConfig.outputCfg.channels = AUDIO_CHANNEL_OUT_STEREO;
-    pDbp->mDownmixConfig.inputCfg.format = AUDIO_FORMAT_PCM_16_BIT;
-    pDbp->mDownmixConfig.outputCfg.format = AUDIO_FORMAT_PCM_16_BIT;
-    pDbp->mDownmixConfig.inputCfg.samplingRate = pTrack->sampleRate;
-    pDbp->mDownmixConfig.outputCfg.samplingRate = pTrack->sampleRate;
-    pDbp->mDownmixConfig.inputCfg.accessMode = EFFECT_BUFFER_ACCESS_READ;
-    pDbp->mDownmixConfig.outputCfg.accessMode = EFFECT_BUFFER_ACCESS_WRITE;
-    // input and output buffer provider, and frame count will not be used as the downmix effect
-    // process() function is called directly (see DownmixerBufferProvider::getNextBuffer())
-    pDbp->mDownmixConfig.inputCfg.mask = EFFECT_CONFIG_SMP_RATE | EFFECT_CONFIG_CHANNELS |
-            EFFECT_CONFIG_FORMAT | EFFECT_CONFIG_ACC_MODE;
-    pDbp->mDownmixConfig.outputCfg.mask = pDbp->mDownmixConfig.inputCfg.mask;
-
-    {// scope for local variables that are not used in goto label "noDownmixForActiveTrack"
-        int cmdStatus;
-        uint32_t replySize = sizeof(int);
-
-        // Configure and enable downmixer
-        status = (*pDbp->mDownmixHandle)->command(pDbp->mDownmixHandle,
-                EFFECT_CMD_SET_CONFIG /*cmdCode*/, sizeof(effect_config_t) /*cmdSize*/,
-                &pDbp->mDownmixConfig /*pCmdData*/,
-                &replySize /*replySize*/, &cmdStatus /*pReplyData*/);
-        if ((status != 0) || (cmdStatus != 0)) {
-            ALOGE("error %d while configuring downmixer for track %d", status, trackName);
-            goto noDownmixForActiveTrack;
-        }
-        replySize = sizeof(int);
-        status = (*pDbp->mDownmixHandle)->command(pDbp->mDownmixHandle,
-                EFFECT_CMD_ENABLE /*cmdCode*/, 0 /*cmdSize*/, NULL /*pCmdData*/,
-                &replySize /*replySize*/, &cmdStatus /*pReplyData*/);
-        if ((status != 0) || (cmdStatus != 0)) {
-            ALOGE("error %d while enabling downmixer for track %d", status, trackName);
-            goto noDownmixForActiveTrack;
-        }
-
-        // Set downmix type
-        // parameter size rounded for padding on 32bit boundary
-        const int psizePadded = ((sizeof(downmix_params_t) - 1)/sizeof(int) + 1) * sizeof(int);
-        const int downmixParamSize =
-                sizeof(effect_param_t) + psizePadded + sizeof(downmix_type_t);
-        effect_param_t * const param = (effect_param_t *) malloc(downmixParamSize);
-        param->psize = sizeof(downmix_params_t);
-        const downmix_params_t downmixParam = DOWNMIX_PARAM_TYPE;
-        memcpy(param->data, &downmixParam, param->psize);
-        const downmix_type_t downmixType = DOWNMIX_TYPE_FOLD;
-        param->vsize = sizeof(downmix_type_t);
-        memcpy(param->data + psizePadded, &downmixType, param->vsize);
-
-        status = (*pDbp->mDownmixHandle)->command(pDbp->mDownmixHandle,
-                EFFECT_CMD_SET_PARAM /* cmdCode */, downmixParamSize/* cmdSize */,
-                param /*pCmndData*/, &replySize /*replySize*/, &cmdStatus /*pReplyData*/);
-
-        free(param);
-
-        if ((status != 0) || (cmdStatus != 0)) {
-            ALOGE("error %d while setting downmix type for track %d", status, trackName);
-            goto noDownmixForActiveTrack;
-        } else {
-            ALOGV("downmix type set to %d for track %d", (int) downmixType, trackName);
-        }
-    }// end of scope for local variables that are not used in goto label "noDownmixForActiveTrack"
-
-    // initialization successful:
-    pTrack->mMixerInFormat = AUDIO_FORMAT_PCM_16_BIT; // 16 bit input is required for downmix
-    pTrack->downmixerBufferProvider = pDbp;
-    reconfigureBufferProviders(pTrack);
-    return NO_ERROR;
-
-noDownmixForActiveTrack:
-    delete pDbp;
     pTrack->downmixerBufferProvider = NULL;
     reconfigureBufferProviders(pTrack);
     return NO_INIT;
@@ -521,7 +599,8 @@
     if (pTrack->mFormat != pTrack->mMixerInFormat) {
         pTrack->mReformatBufferProvider = new ReformatBufferProvider(
                 audio_channel_count_from_out_mask(pTrack->channelMask),
-                pTrack->mFormat, pTrack->mMixerInFormat);
+                pTrack->mFormat, pTrack->mMixerInFormat,
+                kCopyBufferFrameCount);
         reconfigureBufferProviders(pTrack);
     }
     return NO_ERROR;
@@ -531,11 +610,11 @@
 {
     pTrack->bufferProvider = pTrack->mInputBufferProvider;
     if (pTrack->mReformatBufferProvider) {
-        pTrack->mReformatBufferProvider->mTrackBufferProvider = pTrack->bufferProvider;
+        pTrack->mReformatBufferProvider->setBufferProvider(pTrack->bufferProvider);
         pTrack->bufferProvider = pTrack->mReformatBufferProvider;
     }
     if (pTrack->downmixerBufferProvider) {
-        pTrack->downmixerBufferProvider->mTrackBufferProvider = pTrack->bufferProvider;
+        pTrack->downmixerBufferProvider->setBufferProvider(pTrack->bufferProvider);
         pTrack->bufferProvider = pTrack->downmixerBufferProvider;
     }
 }
@@ -1780,29 +1859,9 @@
 /*static*/ void AudioMixer::sInitRoutine()
 {
     LocalClock lc;
-    sLocalTimeFreq = lc.getLocalFreq();
+    sLocalTimeFreq = lc.getLocalFreq(); // for the resampler
 
-    // find multichannel downmix effect if we have to play multichannel content
-    uint32_t numEffects = 0;
-    int ret = EffectQueryNumberEffects(&numEffects);
-    if (ret != 0) {
-        ALOGE("AudioMixer() error %d querying number of effects", ret);
-        return;
-    }
-    ALOGV("EffectQueryNumberEffects() numEffects=%d", numEffects);
-
-    for (uint32_t i = 0 ; i < numEffects ; i++) {
-        if (EffectQueryEffect(i, &sDwnmFxDesc) == 0) {
-            ALOGV("effect %d is called %s", i, sDwnmFxDesc.name);
-            if (memcmp(&sDwnmFxDesc.type, EFFECT_UIID_DOWNMIX, sizeof(effect_uuid_t)) == 0) {
-                ALOGI("found effect \"%s\" from %s",
-                        sDwnmFxDesc.name, sDwnmFxDesc.implementor);
-                sIsMultichannelCapable = true;
-                break;
-            }
-        }
-    }
-    ALOGW_IF(!sIsMultichannelCapable, "unable to find downmix effect");
+    DownmixerBufferProvider::init(); // for the downmixer
 }
 
 template <int MIXTYPE, int NCHAN, bool USEFLOATVOL, bool ADJUSTVOL,
diff --git a/services/audioflinger/AudioMixer.h b/services/audioflinger/AudioMixer.h
index a9f4761..09a4d89 100644
--- a/services/audioflinger/AudioMixer.h
+++ b/services/audioflinger/AudioMixer.h
@@ -153,8 +153,7 @@
 
     struct state_t;
     struct track_t;
-    class DownmixerBufferProvider;
-    class ReformatBufferProvider;
+    class CopyBufferProvider;
 
     typedef void (*hook_t)(track_t* t, int32_t* output, size_t numOutFrames, int32_t* temp,
                            int32_t* aux);
@@ -206,9 +205,9 @@
         int32_t*           auxBuffer;
 
         // 16-byte boundary
-        AudioBufferProvider*     mInputBufferProvider;    // 4 bytes
-        ReformatBufferProvider*  mReformatBufferProvider; // 4 bytes
-        DownmixerBufferProvider* downmixerBufferProvider; // 4 bytes
+        AudioBufferProvider*     mInputBufferProvider;    // externally provided buffer provider.
+        CopyBufferProvider*      mReformatBufferProvider; // provider wrapper for reformatting.
+        CopyBufferProvider*      downmixerBufferProvider; // wrapper for channel conversion.
 
         int32_t     sessionId;
 
@@ -253,48 +252,112 @@
         track_t         tracks[MAX_NUM_TRACKS] __attribute__((aligned(32)));
     };
 
-    // AudioBufferProvider that wraps a track AudioBufferProvider by a call to a downmix effect
-    class DownmixerBufferProvider : public AudioBufferProvider {
+    // Base AudioBufferProvider class used for DownMixerBufferProvider, RemixBufferProvider,
+    // and ReformatBufferProvider.
+    // It handles a private buffer for use in converting format or channel masks from the
+    // input data to a form acceptable by the mixer.
+    // TODO: Make a ResamplerBufferProvider when integers are entirely removed from the
+    // processing pipeline.
+    class CopyBufferProvider : public AudioBufferProvider {
     public:
-        virtual status_t getNextBuffer(Buffer* buffer, int64_t pts);
-        virtual void releaseBuffer(Buffer* buffer);
-        DownmixerBufferProvider();
-        virtual ~DownmixerBufferProvider();
+        // Use a private buffer of bufferFrameCount frames (each frame is outputFrameSize bytes).
+        // If bufferFrameCount is 0, no private buffer is created and in-place modification of
+        // the upstream buffer provider's buffers is performed by copyFrames().
+        CopyBufferProvider(size_t inputFrameSize, size_t outputFrameSize,
+                size_t bufferFrameCount);
+        virtual ~CopyBufferProvider();
 
-        AudioBufferProvider* mTrackBufferProvider;
-        effect_handle_t    mDownmixHandle;
-        effect_config_t    mDownmixConfig;
-    };
-
-    // AudioBufferProvider wrapper that reformats track to acceptable mixer input type
-    class ReformatBufferProvider : public AudioBufferProvider {
-    public:
-        ReformatBufferProvider(int32_t channels,
-                audio_format_t inputFormat, audio_format_t outputFormat);
-        virtual ~ReformatBufferProvider();
-
-        // overrides AudioBufferProvider methods
+        // Overrides AudioBufferProvider methods
         virtual status_t getNextBuffer(Buffer* buffer, int64_t pts);
         virtual void releaseBuffer(Buffer* buffer);
 
-        void reset();
-        inline bool requiresInternalBuffers() {
-            return true; //mInputFrameSize < mOutputFrameSize;
+        // Other public methods
+
+        // call this to release the buffer to the upstream provider.
+        // treat it as an audio discontinuity for future samples.
+        virtual void reset();
+
+        // this function should be supplied by the derived class.  It converts
+        // #frames in the *src pointer to the *dst pointer.  It is public because
+        // some providers will allow this to work on arbitrary buffers outside
+        // of the internal buffers.
+        virtual void copyFrames(void *dst, const void *src, size_t frames) = 0;
+
+        // set the upstream buffer provider. Consider calling "reset" before this function.
+        void setBufferProvider(AudioBufferProvider *p) {
+            mTrackBufferProvider = p;
         }
 
+    protected:
         AudioBufferProvider* mTrackBufferProvider;
-        int32_t              mChannels;
-        audio_format_t       mInputFormat;
-        audio_format_t       mOutputFormat;
-        size_t               mInputFrameSize;
-        size_t               mOutputFrameSize;
-        // (only) required for reformatting to a larger size.
+        const size_t         mInputFrameSize;
+        const size_t         mOutputFrameSize;
+    private:
         AudioBufferProvider::Buffer mBuffer;
-        void*                mOutputData;
-        size_t               mOutputCount;
+        const size_t         mLocalBufferFrameCount;
+        void*                mLocalBufferData;
         size_t               mConsumed;
     };
 
+    // DownmixerBufferProvider wraps a track AudioBufferProvider to provide
+    // position dependent downmixing by an Audio Effect.
+    class DownmixerBufferProvider : public CopyBufferProvider {
+    public:
+        DownmixerBufferProvider(audio_channel_mask_t inputChannelMask,
+                audio_channel_mask_t outputChannelMask, audio_format_t format,
+                uint32_t sampleRate, int32_t sessionId, size_t bufferFrameCount);
+        virtual ~DownmixerBufferProvider();
+        virtual void copyFrames(void *dst, const void *src, size_t frames);
+        bool isValid() const { return mDownmixHandle != NULL; }
+
+        static status_t init();
+        static bool isMultichannelCapable() { return sIsMultichannelCapable; }
+
+    protected:
+        effect_handle_t    mDownmixHandle;
+        effect_config_t    mDownmixConfig;
+
+        // effect descriptor for the downmixer used by the mixer
+        static effect_descriptor_t sDwnmFxDesc;
+        // indicates whether a downmix effect has been found and is usable by this mixer
+        static bool                sIsMultichannelCapable;
+        // FIXME: should we allow effects outside of the framework?
+        // We need to here. A special ioId that must be <= -2 so it does not map to a session.
+        static const int32_t SESSION_ID_INVALID_AND_IGNORED = -2;
+    };
+
+    // RemixBufferProvider wraps a track AudioBufferProvider to perform an
+    // upmix or downmix to the proper channel count and mask.
+    class RemixBufferProvider : public CopyBufferProvider {
+    public:
+        RemixBufferProvider(audio_channel_mask_t inputChannelMask,
+                audio_channel_mask_t outputChannelMask, audio_format_t format,
+                size_t bufferFrameCount);
+        virtual void copyFrames(void *dst, const void *src, size_t frames);
+
+    protected:
+        const audio_format_t mFormat;
+        const size_t         mSampleSize;
+        const size_t         mInputChannels;
+        const size_t         mOutputChannels;
+        int8_t               mIdxAry[sizeof(uint32_t)*8]; // 32 bits => channel indices
+    };
+
+    // ReformatBufferProvider wraps a track AudioBufferProvider to convert the input data
+    // to an acceptable mixer input format type.
+    class ReformatBufferProvider : public CopyBufferProvider {
+    public:
+        ReformatBufferProvider(int32_t channels,
+                audio_format_t inputFormat, audio_format_t outputFormat,
+                size_t bufferFrameCount);
+        virtual void copyFrames(void *dst, const void *src, size_t frames);
+
+    protected:
+        const int32_t        mChannels;
+        const audio_format_t mInputFormat;
+        const audio_format_t mOutputFormat;
+    };
+
     // bitmask of allocated track names, where bit 0 corresponds to TRACK0 etc.
     uint32_t        mTrackNames;
 
@@ -310,11 +373,6 @@
 private:
     state_t         mState __attribute__((aligned(32)));
 
-    // effect descriptor for the downmixer used by the mixer
-    static effect_descriptor_t sDwnmFxDesc;
-    // indicates whether a downmix effect has been found and is usable by this mixer
-    static bool                sIsMultichannelCapable;
-
     // Call after changing either the enabled status of a track, or parameters of an enabled track.
     // OK to call more often than that, but unnecessary.
     void invalidateState(uint32_t mask);
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
old mode 100755
new mode 100644
index decb985..e0b664b
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -3643,7 +3643,7 @@
             memset(mEffectBuffer, 0, mEffectBufferSize);
         }
         // FIXME as a performance optimization, should remember previous zero status
-        memset(mSinkBuffer, 0, mNormalFrameCount * mChannelCount * sizeof(int16_t));
+        memset(mSinkBuffer, 0, mNormalFrameCount * mFrameSize);
     }
 
     // if any fast tracks, then status is ready
@@ -4783,7 +4783,7 @@
     , mPipeFramesP2(0)
     // mPipeMemory
     // mFastCaptureNBLogWriter
-    , mFastTrackAvail(true)
+    , mFastTrackAvail(false)
 {
     snprintf(mName, kNameLength, "AudioIn_%X", id);
     mNBLogWriter = audioFlinger->newWriter_l(kLogSize, mName);
@@ -4895,6 +4895,7 @@
         // FIXME
 #endif
 
+        mFastTrackAvail = true;
     }
 failed: ;
 
@@ -5234,10 +5235,10 @@
                         if (mChannelCount == activeTrack->mChannelCount) {
                             memcpy(dst, src, part1 * mFrameSize);
                         } else if (mChannelCount == 1) {
-                            upmix_to_stereo_i16_from_mono_i16((int16_t *)dst, (int16_t *)src,
+                            upmix_to_stereo_i16_from_mono_i16((int16_t *)dst, (const int16_t *)src,
                                     part1);
                         } else {
-                            downmix_to_mono_i16_from_stereo_i16((int16_t *)dst, (int16_t *)src,
+                            downmix_to_mono_i16_from_stereo_i16((int16_t *)dst, (const int16_t *)src,
                                     part1);
                         }
                         dst += part1 * activeTrack->mFrameSize;
@@ -5309,7 +5310,7 @@
                         // the resampler always outputs stereo samples:
                         // do post stereo to mono conversion
                         downmix_to_mono_i16_from_stereo_i16(activeTrack->mSink.i16,
-                                (int16_t *)activeTrack->mRsmpOutBuffer, framesOut);
+                                (const int16_t *)activeTrack->mRsmpOutBuffer, framesOut);
                     } else {
                         ditherAndClamp((int32_t *)activeTrack->mSink.raw,
                                 activeTrack->mRsmpOutBuffer, framesOut);
@@ -5457,21 +5458,14 @@
     // client expresses a preference for FAST, but we get the final say
     if (*flags & IAudioFlinger::TRACK_FAST) {
       if (
-            // use case: callback handler and frame count is default or at least as large as HAL
-            (
-                (tid != -1) &&
-                ((frameCount == 0) /*||
-                // FIXME must be equal to pipe depth, so don't allow it to be specified by client
-                // FIXME not necessarily true, should be native frame count for native SR!
-                (frameCount >= mFrameCount)*/)
-            ) &&
+            // use case: callback handler
+            (tid != -1) &&
+            // frame count is not specified, or is exactly the pipe depth
+            ((frameCount == 0) || (frameCount == mPipeFramesP2)) &&
             // PCM data
             audio_is_linear_pcm(format) &&
             // native format
             (format == mFormat) &&
-            // mono or stereo
-            ( (channelMask == AUDIO_CHANNEL_IN_MONO) ||
-              (channelMask == AUDIO_CHANNEL_IN_STEREO) ) &&
             // native channel mask
             (channelMask == mChannelMask) &&
             // native hardware sample rate
@@ -5481,40 +5475,43 @@
             // there are sufficient fast track slots available
             mFastTrackAvail
         ) {
-        // if frameCount not specified, then it defaults to pipe frame count
-        if (frameCount == 0) {
-            frameCount = mPipeFramesP2;
-        }
-        ALOGV("AUDIO_INPUT_FLAG_FAST accepted: frameCount=%d mFrameCount=%d",
+        ALOGV("AUDIO_INPUT_FLAG_FAST accepted: frameCount=%u mFrameCount=%u",
                 frameCount, mFrameCount);
       } else {
-        ALOGV("AUDIO_INPUT_FLAG_FAST denied: frameCount=%d "
-                "mFrameCount=%d format=%d isLinear=%d channelMask=%#x sampleRate=%u mSampleRate=%u "
+        ALOGV("AUDIO_INPUT_FLAG_FAST denied: frameCount=%u mFrameCount=%u mPipeFramesP2=%u "
+                "format=%#x isLinear=%d channelMask=%#x sampleRate=%u mSampleRate=%u "
                 "hasFastCapture=%d tid=%d mFastTrackAvail=%d",
-                frameCount, mFrameCount, format,
-                audio_is_linear_pcm(format),
-                channelMask, sampleRate, mSampleRate, hasFastCapture(), tid, mFastTrackAvail);
+                frameCount, mFrameCount, mPipeFramesP2,
+                format, audio_is_linear_pcm(format), channelMask, sampleRate, mSampleRate,
+                hasFastCapture(), tid, mFastTrackAvail);
         *flags &= ~IAudioFlinger::TRACK_FAST;
-        // FIXME It's not clear that we need to enforce this any more, since we have a pipe.
-        // For compatibility with AudioRecord calculation, buffer depth is forced
-        // to be at least 2 x the record thread frame count and cover audio hardware latency.
-        // This is probably too conservative, but legacy application code may depend on it.
-        // If you change this calculation, also review the start threshold which is related.
-        // FIXME It's not clear how input latency actually matters.  Perhaps this should be 0.
-        uint32_t latencyMs = 50; // FIXME mInput->stream->get_latency(mInput->stream);
-        size_t mNormalFrameCount = 2048; // FIXME
-        uint32_t minBufCount = latencyMs / ((1000 * mNormalFrameCount) / mSampleRate);
-        if (minBufCount < 2) {
-            minBufCount = 2;
-        }
-        size_t minFrameCount = mNormalFrameCount * minBufCount;
+      }
+    }
+
+    // compute track buffer size in frames, and suggest the notification frame count
+    if (*flags & IAudioFlinger::TRACK_FAST) {
+        // fast track: frame count is exactly the pipe depth
+        frameCount = mPipeFramesP2;
+        // ignore requested notificationFrames, and always notify exactly once every HAL buffer
+        *notificationFrames = mFrameCount;
+    } else {
+        // not fast track: frame count is at least 2 HAL buffers and at least 20 ms
+        size_t minFrameCount = ((int64_t) mFrameCount * 2 * sampleRate + mSampleRate - 1) /
+                mSampleRate;
         if (frameCount < minFrameCount) {
             frameCount = minFrameCount;
         }
-      }
+        minFrameCount = (sampleRate * 20 / 1000 + 1) & ~1;
+        if (frameCount < minFrameCount) {
+            frameCount = minFrameCount;
+        }
+        // notification is forced to be at least double-buffering
+        size_t maxNotification = frameCount / 2;
+        if (*notificationFrames == 0 || *notificationFrames > maxNotification) {
+            *notificationFrames = maxNotification;
+        }
     }
     *pFrameCount = frameCount;
-    *notificationFrames = 0;    // FIXME implement
 
     lStatus = initCheck();
     if (lStatus != NO_ERROR) {
@@ -5745,6 +5742,7 @@
     } else {
         dprintf(fd, "  No active record clients\n");
     }
+    dprintf(fd, "  Fast capture thread: %s\n", hasFastCapture() ? "yes" : "no");
     dprintf(fd, "  Fast track available: %s\n", mFastTrackAvail ? "yes" : "no");
 
     dumpBase(fd, args);
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index cacb066..af761e4 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -1630,12 +1630,11 @@
                 frameCount, mChannelMask);
         // since client and server are in the same process,
         // the buffer has the same virtual address on both sides
-        mClientProxy = new AudioTrackClientProxy(mCblk, mBuffer, mFrameCount, mFrameSize);
+        mClientProxy = new AudioTrackClientProxy(mCblk, mBuffer, mFrameCount, mFrameSize,
+                true /*clientInServer*/);
         mClientProxy->setVolumeLR(GAIN_MINIFLOAT_PACKED_UNITY);
         mClientProxy->setSendLevel(0.0);
         mClientProxy->setSampleRate(sampleRate);
-        mClientProxy = new AudioTrackClientProxy(mCblk, mBuffer, mFrameCount, mFrameSize,
-                true /*clientInServer*/);
     } else {
         ALOGW("Error creating output track on thread %p", playbackThread);
     }
@@ -1987,12 +1986,12 @@
 
 /*static*/ void AudioFlinger::RecordThread::RecordTrack::appendDumpHeader(String8& result)
 {
-    result.append("    Active Client Fmt Chn mask Session S   Server fCount Resampling\n");
+    result.append("    Active Client Fmt Chn mask Session S   Server fCount SRate\n");
 }
 
 void AudioFlinger::RecordThread::RecordTrack::dump(char* buffer, size_t size, bool active)
 {
-    snprintf(buffer, size, "    %6s %6u %3u %08X %7u %1d %08X %6zu %10d\n",
+    snprintf(buffer, size, "    %6s %6u %3u %08X %7u %1d %08X %6zu %5u\n",
             active ? "yes" : "no",
             (mClient == 0) ? getpid_cached : mClient->pid(),
             mFormat,
@@ -2001,7 +2000,7 @@
             mState,
             mCblk->mServer,
             mFrameCount,
-            mResampler != NULL);
+            mSampleRate);
 
 }
 
diff --git a/services/audiopolicy/Android.mk b/services/audiopolicy/Android.mk
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/AudioPolicyClientImpl.cpp b/services/audiopolicy/AudioPolicyClientImpl.cpp
index c322d92..b5af089 100644
--- a/services/audiopolicy/AudioPolicyClientImpl.cpp
+++ b/services/audiopolicy/AudioPolicyClientImpl.cpp
@@ -101,7 +101,8 @@
                               audio_devices_t *pDevices,
                               uint32_t *pSamplingRate,
                               audio_format_t *pFormat,
-                              audio_channel_mask_t *pChannelMask)
+                              audio_channel_mask_t *pChannelMask,
+                              audio_input_flags_t flags)
 {
     sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
     if (af == 0) {
@@ -109,7 +110,7 @@
         return 0;
     }
 
-    return af->openInput(module, pDevices, pSamplingRate, pFormat, pChannelMask);
+    return af->openInput(module, pDevices, pSamplingRate, pFormat, pChannelMask, flags);
 }
 
 status_t AudioPolicyService::AudioPolicyClient::closeInput(audio_io_handle_t input)
diff --git a/services/audiopolicy/AudioPolicyClientImplLegacy.cpp b/services/audiopolicy/AudioPolicyClientImplLegacy.cpp
index 53f3e2d..97e12cc 100644
--- a/services/audiopolicy/AudioPolicyClientImplLegacy.cpp
+++ b/services/audiopolicy/AudioPolicyClientImplLegacy.cpp
@@ -158,7 +158,8 @@
         return 0;
     }
 
-    return af->openInput((audio_module_handle_t)0, pDevices, pSamplingRate, pFormat, pChannelMask);
+    return af->openInput((audio_module_handle_t)0, pDevices, pSamplingRate, pFormat, pChannelMask,
+            AUDIO_INPUT_FLAG_FAST /*FIXME*/);
 }
 
 audio_io_handle_t aps_open_input_on_module(void *service __unused,
@@ -174,7 +175,8 @@
         return 0;
     }
 
-    return af->openInput(module, pDevices, pSamplingRate, pFormat, pChannelMask);
+    return af->openInput(module, pDevices, pSamplingRate, pFormat, pChannelMask,
+            AUDIO_INPUT_FLAG_FAST /*FIXME*/);
 }
 
 int aps_close_input(void *service __unused, audio_io_handle_t input)
diff --git a/services/audiopolicy/AudioPolicyEffects.cpp b/services/audiopolicy/AudioPolicyEffects.cpp
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/AudioPolicyEffects.h b/services/audiopolicy/AudioPolicyEffects.h
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/AudioPolicyInterface.h b/services/audiopolicy/AudioPolicyInterface.h
index 33e4397..ed66e58 100644
--- a/services/audiopolicy/AudioPolicyInterface.h
+++ b/services/audiopolicy/AudioPolicyInterface.h
@@ -238,7 +238,8 @@
                                         audio_devices_t *pDevices,
                                         uint32_t *pSamplingRate,
                                         audio_format_t *pFormat,
-                                        audio_channel_mask_t *pChannelMask) = 0;
+                                        audio_channel_mask_t *pChannelMask,
+                                        audio_input_flags_t flags) = 0;
     // closes an audio input
     virtual status_t closeInput(audio_io_handle_t input) = 0;
     //
diff --git a/services/audiopolicy/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/AudioPolicyInterfaceImpl.cpp
old mode 100755
new mode 100644
index 5a13ac2..a41721f
--- a/services/audiopolicy/AudioPolicyInterfaceImpl.cpp
+++ b/services/audiopolicy/AudioPolicyInterfaceImpl.cpp
@@ -214,7 +214,8 @@
                                     uint32_t samplingRate,
                                     audio_format_t format,
                                     audio_channel_mask_t channelMask,
-                                    int audioSession)
+                                    int audioSession,
+                                    audio_input_flags_t flags __unused)
 {
     if (mAudioPolicyManager == NULL) {
         return 0;
diff --git a/services/audiopolicy/AudioPolicyInterfaceImplLegacy.cpp b/services/audiopolicy/AudioPolicyInterfaceImplLegacy.cpp
old mode 100755
new mode 100644
index 406988c..5ef02e5
--- a/services/audiopolicy/AudioPolicyInterfaceImplLegacy.cpp
+++ b/services/audiopolicy/AudioPolicyInterfaceImplLegacy.cpp
@@ -202,7 +202,8 @@
                                     uint32_t samplingRate,
                                     audio_format_t format,
                                     audio_channel_mask_t channelMask,
-                                    int audioSession)
+                                    int audioSession,
+                                    audio_input_flags_t flags __unused)
 {
     if (mpAudioPolicy == NULL) {
         return 0;
@@ -485,4 +486,17 @@
     return INVALID_OPERATION;
 }
 
+audio_io_handle_t AudioPolicyService::getOutputForAttr(const audio_attributes_t *attr __unused,
+                                    uint32_t samplingRate,
+                                    audio_format_t format,
+                                    audio_channel_mask_t channelMask,
+                                    audio_output_flags_t flags,
+                                    const audio_offload_info_t *offloadInfo)
+{
+    //FIXME: temporary to fix build with USE_LEGACY_AUDIO_POLICY
+    audio_stream_type_t stream = AUDIO_STREAM_MUSIC;
+    return getOutput(stream, samplingRate, format, channelMask, flags, offloadInfo);
+}
+
+
 }; // namespace android
diff --git a/services/audiopolicy/AudioPolicyManager.cpp b/services/audiopolicy/AudioPolicyManager.cpp
index 1b4796b..d9acb56 100644
--- a/services/audiopolicy/AudioPolicyManager.cpp
+++ b/services/audiopolicy/AudioPolicyManager.cpp
@@ -560,6 +560,13 @@
         forceVolumeReeval = true;
         mForceUse[usage] = config;
         break;
+    case AUDIO_POLICY_FORCE_FOR_HDMI_SYSTEM_AUDIO:
+        if (config != AUDIO_POLICY_FORCE_NONE &&
+            config != AUDIO_POLICY_FORCE_HDMI_SYSTEM_AUDIO_ENFORCED) {
+            ALOGW("setForceUse() invalid config %d forHDMI_SYSTEM_AUDIO", config);
+        }
+        mForceUse[usage] = config;
+        break;
     default:
         ALOGW("setForceUse() invalid usage %d", usage);
         break;
@@ -1104,7 +1111,8 @@
                                     &inputDesc->mDevice,
                                     &inputDesc->mSamplingRate,
                                     &inputDesc->mFormat,
-                                    &inputDesc->mChannelMask);
+                                    &inputDesc->mChannelMask,
+                                    AUDIO_INPUT_FLAG_FAST /*FIXME*/);
 
     // only accept input with the exact requested set of parameters
     if (input == 0 ||
@@ -1529,6 +1537,9 @@
     result.append(buffer);
     snprintf(buffer, SIZE, " Force use for system %d\n", mForceUse[AUDIO_POLICY_FORCE_FOR_SYSTEM]);
     result.append(buffer);
+    snprintf(buffer, SIZE, " Force use for hdmi system audio %d\n",
+            mForceUse[AUDIO_POLICY_FORCE_FOR_HDMI_SYSTEM_AUDIO]);
+    result.append(buffer);
 
     snprintf(buffer, SIZE, " Available output devices:\n");
     result.append(buffer);
@@ -2322,7 +2333,8 @@
                                                     &inputDesc->mDevice,
                                                     &inputDesc->mSamplingRate,
                                                     &inputDesc->mFormat,
-                                                    &inputDesc->mChannelMask);
+                                                    &inputDesc->mChannelMask,
+                                                    AUDIO_INPUT_FLAG_FAST /*FIXME*/);
 
                 if (input != 0) {
                     for (size_t k = 0; k  < inProfile->mSupportedDevices.size(); k++) {
@@ -2888,7 +2900,8 @@
                                             &desc->mDevice,
                                             &desc->mSamplingRate,
                                             &desc->mFormat,
-                                            &desc->mChannelMask);
+                                            &desc->mChannelMask,
+                                            AUDIO_INPUT_FLAG_FAST /*FIXME*/);
 
             if (input != 0) {
                 if (!address.isEmpty()) {
@@ -3554,10 +3567,10 @@
         }
         int device3 = AUDIO_DEVICE_NONE;
         if (strategy == STRATEGY_MEDIA) {
-            // ARC, SPDIF and LINE can co-exist with others.
+            // ARC, SPDIF and AUX_LINE can co-exist with others.
             device3 = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_HDMI_ARC;
             device3 |= (availableOutputDeviceTypes & AUDIO_DEVICE_OUT_SPDIF);
-            device3 |= (availableOutputDeviceTypes & AUDIO_DEVICE_OUT_LINE);
+            device3 |= (availableOutputDeviceTypes & AUDIO_DEVICE_OUT_AUX_LINE);
         }
 
         device2 |= device3;
@@ -3565,6 +3578,13 @@
         // STRATEGY_ENFORCED_AUDIBLE, AUDIO_DEVICE_NONE otherwise
         device |= device2;
 
+        // If hdmi system audio mode is on, remove speaker out of output list.
+        if ((strategy == STRATEGY_MEDIA) &&
+            (mForceUse[AUDIO_POLICY_FORCE_FOR_HDMI_SYSTEM_AUDIO] ==
+                AUDIO_POLICY_FORCE_HDMI_SYSTEM_AUDIO_ENFORCED)) {
+            device &= ~AUDIO_DEVICE_OUT_SPEAKER;
+        }
+
         if (device) break;
         device = mDefaultOutputDevice->mDeviceType;
         if (device == AUDIO_DEVICE_NONE) {
@@ -5296,7 +5316,9 @@
 const audio_format_t AudioPolicyManager::AudioPort::sPcmFormatCompareTable[] = {
         AUDIO_FORMAT_DEFAULT,
         AUDIO_FORMAT_PCM_16_BIT,
+        AUDIO_FORMAT_PCM_8_24_BIT,
         AUDIO_FORMAT_PCM_24_BIT_PACKED,
+        AUDIO_FORMAT_PCM_32_BIT,
 };
 
 int AudioPolicyManager::AudioPort::compareFormats(audio_format_t format1,
diff --git a/services/audiopolicy/AudioPolicyService.cpp b/services/audiopolicy/AudioPolicyService.cpp
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/AudioPolicyService.h b/services/audiopolicy/AudioPolicyService.h
old mode 100755
new mode 100644
index 380fd5e..08942ee
--- a/services/audiopolicy/AudioPolicyService.h
+++ b/services/audiopolicy/AudioPolicyService.h
@@ -86,10 +86,11 @@
                                 int session = 0);
     virtual void releaseOutput(audio_io_handle_t output);
     virtual audio_io_handle_t getInput(audio_source_t inputSource,
-                                    uint32_t samplingRate = 0,
-                                    audio_format_t format = AUDIO_FORMAT_DEFAULT,
-                                    audio_channel_mask_t channelMask = 0,
-                                    int audioSession = 0);
+                                    uint32_t samplingRate,
+                                    audio_format_t format,
+                                    audio_channel_mask_t channelMask,
+                                    int audioSession,
+                                    audio_input_flags_t flags);
     virtual status_t startInput(audio_io_handle_t input);
     virtual status_t stopInput(audio_io_handle_t input);
     virtual void releaseInput(audio_io_handle_t input);
@@ -388,7 +389,8 @@
                                             audio_devices_t *pDevices,
                                             uint32_t *pSamplingRate,
                                             audio_format_t *pFormat,
-                                            audio_channel_mask_t *pChannelMask);
+                                            audio_channel_mask_t *pChannelMask,
+                                            audio_input_flags_t flags);
         // closes an audio input
         virtual status_t closeInput(audio_io_handle_t input);
         //
diff --git a/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp b/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp
index 3de5d90..312a78c 100644
--- a/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp
@@ -40,7 +40,12 @@
 
     {
         SharedParameters::Lock l(client->getParameters());
-        mUsePartialQuirk = l.mParameters.quirks.partialResults;
+
+        if (client->getCameraDeviceVersion() >= CAMERA_DEVICE_API_VERSION_3_2) {
+            mUsePartialResult = (mNumPartialResults > 1);
+        } else {
+            mUsePartialResult = l.mParameters.quirks.partialResults;
+        }
 
         // Initialize starting 3A state
         m3aState.afTriggerId = l.mParameters.afTriggerCounter;
@@ -63,17 +68,21 @@
         return false;
     }
 
-    bool partialResult = false;
-    if (mUsePartialQuirk) {
-        camera_metadata_entry_t entry;
-        entry = frame.mMetadata.find(ANDROID_QUIRKS_PARTIAL_RESULT);
-        if (entry.count > 0 &&
-                entry.data.u8[0] == ANDROID_QUIRKS_PARTIAL_RESULT_PARTIAL) {
-            partialResult = true;
+    bool isPartialResult = false;
+    if (mUsePartialResult) {
+        if (client->getCameraDeviceVersion() >= CAMERA_DEVICE_API_VERSION_3_2) {
+            isPartialResult = frame.mResultExtras.partialResultCount < mNumPartialResults;
+        } else {
+            camera_metadata_entry_t entry;
+            entry = frame.mMetadata.find(ANDROID_QUIRKS_PARTIAL_RESULT);
+            if (entry.count > 0 &&
+                    entry.data.u8[0] == ANDROID_QUIRKS_PARTIAL_RESULT_PARTIAL) {
+                isPartialResult = true;
+            }
         }
     }
 
-    if (!partialResult && processFaceDetect(frame.mMetadata, client) != OK) {
+    if (!isPartialResult && processFaceDetect(frame.mMetadata, client) != OK) {
         return false;
     }
 
diff --git a/services/camera/libcameraservice/api1/client2/FrameProcessor.h b/services/camera/libcameraservice/api1/client2/FrameProcessor.h
index 4afca50..68cf55b 100644
--- a/services/camera/libcameraservice/api1/client2/FrameProcessor.h
+++ b/services/camera/libcameraservice/api1/client2/FrameProcessor.h
@@ -91,8 +91,8 @@
         }
     } m3aState;
 
-    // Whether the partial result quirk is enabled for this device
-    bool mUsePartialQuirk;
+    // Whether the partial result is enabled for this device
+    bool mUsePartialResult;
 
     // Track most recent frame number for which 3A notifications were sent for.
     // Used to filter against sending 3A notifications for the same frame
diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp b/services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp
index 79f75a5..ab61c44 100644
--- a/services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp
+++ b/services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp
@@ -94,14 +94,14 @@
     entry = result.mMetadata.find(ANDROID_SENSOR_TIMESTAMP);
     nsecs_t timestamp = entry.data.i64[0];
     if (entry.count == 0) {
-        ALOGE("%s: metadata doesn't have timestamp, skip this result");
+        ALOGE("%s: metadata doesn't have timestamp, skip this result", __FUNCTION__);
         return;
     }
     (void)timestamp;
 
     entry = result.mMetadata.find(ANDROID_REQUEST_FRAME_COUNT);
     if (entry.count == 0) {
-        ALOGE("%s: metadata doesn't have frame number, skip this result");
+        ALOGE("%s: metadata doesn't have frame number, skip this result", __FUNCTION__);
         return;
     }
     int32_t frameNumber = entry.data.i32[0];
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
index de42cee..b8611f8 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
@@ -23,6 +23,7 @@
 #include <utils/Trace.h>
 #include <gui/Surface.h>
 #include <camera/camera2/CaptureRequest.h>
+#include <camera/CameraUtils.h>
 
 #include "common/CameraDeviceBase.h"
 #include "api2/CameraDeviceClient.h"
@@ -656,91 +657,8 @@
 status_t CameraDeviceClient::getRotationTransformLocked(int32_t* transform) {
     ALOGV("%s: begin", __FUNCTION__);
 
-    if (transform == NULL) {
-        ALOGW("%s: null transform", __FUNCTION__);
-        return BAD_VALUE;
-    }
-
-    *transform = 0;
-
     const CameraMetadata& staticInfo = mDevice->info();
-    camera_metadata_ro_entry_t entry = staticInfo.find(ANDROID_SENSOR_ORIENTATION);
-    if (entry.count == 0) {
-        ALOGE("%s: Camera %d: Can't find android.sensor.orientation in "
-                "static metadata!", __FUNCTION__, mCameraId);
-        return INVALID_OPERATION;
-    }
-
-    camera_metadata_ro_entry_t entryFacing = staticInfo.find(ANDROID_LENS_FACING);
-    if (entry.count == 0) {
-        ALOGE("%s: Camera %d: Can't find android.lens.facing in "
-                "static metadata!", __FUNCTION__, mCameraId);
-        return INVALID_OPERATION;
-    }
-
-    int32_t& flags = *transform;
-
-    bool mirror = (entryFacing.data.u8[0] == ANDROID_LENS_FACING_FRONT);
-    int orientation = entry.data.i32[0];
-    if (!mirror) {
-        switch (orientation) {
-            case 0:
-                flags = 0;
-                break;
-            case 90:
-                flags = NATIVE_WINDOW_TRANSFORM_ROT_90;
-                break;
-            case 180:
-                flags = NATIVE_WINDOW_TRANSFORM_ROT_180;
-                break;
-            case 270:
-                flags = NATIVE_WINDOW_TRANSFORM_ROT_270;
-                break;
-            default:
-                ALOGE("%s: Invalid HAL android.sensor.orientation value: %d",
-                      __FUNCTION__, orientation);
-                return INVALID_OPERATION;
-        }
-    } else {
-        switch (orientation) {
-            case 0:
-                flags = HAL_TRANSFORM_FLIP_H;
-                break;
-            case 90:
-                flags = HAL_TRANSFORM_FLIP_H | HAL_TRANSFORM_ROT_90;
-                break;
-            case 180:
-                flags = HAL_TRANSFORM_FLIP_V;
-                break;
-            case 270:
-                flags = HAL_TRANSFORM_FLIP_V | HAL_TRANSFORM_ROT_90;
-                break;
-            default:
-                ALOGE("%s: Invalid HAL android.sensor.orientation value: %d",
-                      __FUNCTION__, orientation);
-                return INVALID_OPERATION;
-        }
-
-    }
-
-    /**
-     * This magic flag makes surfaceflinger un-rotate the buffers
-     * to counter the extra global device UI rotation whenever the user
-     * physically rotates the device.
-     *
-     * By doing this, the camera buffer always ends up aligned
-     * with the physical camera for a "see through" effect.
-     *
-     * In essence, the buffer only gets rotated during preview use-cases.
-     * The user is still responsible to re-create streams of the proper
-     * aspect ratio, or the preview will end up looking non-uniformly
-     * stretched.
-     */
-    flags |= NATIVE_WINDOW_TRANSFORM_INVERSE_DISPLAY;
-
-    ALOGV("%s: final transform = 0x%x", __FUNCTION__, flags);
-
-    return OK;
+    return CameraUtils::getRotationTransform(staticInfo, transform);
 }
 
 } // namespace android
diff --git a/services/camera/libcameraservice/common/CameraDeviceBase.h b/services/camera/libcameraservice/common/CameraDeviceBase.h
index c7bd886..037695d 100644
--- a/services/camera/libcameraservice/common/CameraDeviceBase.h
+++ b/services/camera/libcameraservice/common/CameraDeviceBase.h
@@ -252,6 +252,10 @@
      */
     virtual status_t flush(int64_t *lastFrameNumber = NULL) = 0;
 
+    /**
+     * Get the HAL device version.
+     */
+    virtual uint32_t getDeviceVersion() = 0;
 };
 
 }; // namespace android
diff --git a/services/camera/libcameraservice/common/FrameProcessorBase.cpp b/services/camera/libcameraservice/common/FrameProcessorBase.cpp
index 482f687..29eb78f 100644
--- a/services/camera/libcameraservice/common/FrameProcessorBase.cpp
+++ b/services/camera/libcameraservice/common/FrameProcessorBase.cpp
@@ -29,7 +29,17 @@
 
 FrameProcessorBase::FrameProcessorBase(wp<CameraDeviceBase> device) :
     Thread(/*canCallJava*/false),
-    mDevice(device) {
+    mDevice(device),
+    mNumPartialResults(1) {
+    sp<CameraDeviceBase> cameraDevice = device.promote();
+    if (cameraDevice != 0 &&
+            cameraDevice->getDeviceVersion() >= CAMERA_DEVICE_API_VERSION_3_2) {
+        CameraMetadata staticInfo = cameraDevice->info();
+        camera_metadata_entry_t entry = staticInfo.find(ANDROID_REQUEST_PARTIAL_RESULT_COUNT);
+        if (entry.count > 0) {
+            mNumPartialResults = entry.data.i32[0];
+        }
+    }
 }
 
 FrameProcessorBase::~FrameProcessorBase() {
@@ -160,14 +170,18 @@
 
     camera_metadata_ro_entry_t entry;
 
-    // Quirks: Don't deliver partial results to listeners that don't want them
-    bool quirkIsPartial = false;
-    entry = result.mMetadata.find(ANDROID_QUIRKS_PARTIAL_RESULT);
-    if (entry.count != 0 &&
-            entry.data.u8[0] == ANDROID_QUIRKS_PARTIAL_RESULT_PARTIAL) {
-        ALOGV("%s: Camera %d: Not forwarding partial result to listeners",
-                __FUNCTION__, device->getId());
-        quirkIsPartial = true;
+    // Check if this result is partial.
+    bool isPartialResult = false;
+    if (device->getDeviceVersion() >= CAMERA_DEVICE_API_VERSION_3_2) {
+        isPartialResult = result.mResultExtras.partialResultCount < mNumPartialResults;
+    } else {
+        entry = result.mMetadata.find(ANDROID_QUIRKS_PARTIAL_RESULT);
+        if (entry.count != 0 &&
+                entry.data.u8[0] == ANDROID_QUIRKS_PARTIAL_RESULT_PARTIAL) {
+            ALOGV("%s: Camera %d: This is a partial result",
+                    __FUNCTION__, device->getId());
+            isPartialResult = true;
+        }
     }
 
     // TODO: instead of getting requestID from CameraMetadata, we should get it
@@ -186,9 +200,10 @@
         Mutex::Autolock l(mInputMutex);
 
         List<RangeListener>::iterator item = mRangeListeners.begin();
+        // Don't deliver partial results to listeners that don't want them
         while (item != mRangeListeners.end()) {
             if (requestId >= item->minId && requestId < item->maxId &&
-                    (!quirkIsPartial || item->sendPartials)) {
+                    (!isPartialResult || item->sendPartials)) {
                 sp<FilteredListener> listener = item->listener.promote();
                 if (listener == 0) {
                     item = mRangeListeners.erase(item);
diff --git a/services/camera/libcameraservice/common/FrameProcessorBase.h b/services/camera/libcameraservice/common/FrameProcessorBase.h
index 3649c45..a618d84 100644
--- a/services/camera/libcameraservice/common/FrameProcessorBase.h
+++ b/services/camera/libcameraservice/common/FrameProcessorBase.h
@@ -71,6 +71,9 @@
     };
     List<RangeListener> mRangeListeners;
 
+    // Number of partial result the HAL will potentially send.
+    int32_t mNumPartialResults;
+
     void processNewFrames(const sp<CameraDeviceBase> &device);
 
     virtual bool processSingleFrame(CaptureResult &result,
diff --git a/services/camera/libcameraservice/device2/Camera2Device.cpp b/services/camera/libcameraservice/device2/Camera2Device.cpp
index c33c166..89c6b10 100644
--- a/services/camera/libcameraservice/device2/Camera2Device.cpp
+++ b/services/camera/libcameraservice/device2/Camera2Device.cpp
@@ -123,6 +123,7 @@
 
     mDeviceInfo = info.static_camera_characteristics;
     mHal2Device = device;
+    mDeviceVersion = device->common.version;
 
     return OK;
 }
@@ -589,6 +590,11 @@
     return waitUntilDrained();
 }
 
+uint32_t Camera2Device::getDeviceVersion() {
+    ATRACE_CALL();
+    return mDeviceVersion;
+}
+
 /**
  * Camera2Device::MetadataQueue
  */
diff --git a/services/camera/libcameraservice/device2/Camera2Device.h b/services/camera/libcameraservice/device2/Camera2Device.h
index 22a13ac..46182f8 100644
--- a/services/camera/libcameraservice/device2/Camera2Device.h
+++ b/services/camera/libcameraservice/device2/Camera2Device.h
@@ -78,12 +78,16 @@
             buffer_handle_t *buffer, wp<BufferReleasedListener> listener);
     // Flush implemented as just a wait
     virtual status_t flush(int64_t *lastFrameNumber = NULL);
+    virtual uint32_t getDeviceVersion();
+
   private:
     const int mId;
     camera2_device_t *mHal2Device;
 
     CameraMetadata mDeviceInfo;
 
+    uint32_t mDeviceVersion;
+
     /**
      * Queue class for both sending requests to a camera2 device, and for
      * receiving frames from a camera2 device.
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index 6ceb9d4..3004d3e 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -57,7 +57,8 @@
         mId(id),
         mHal3Device(NULL),
         mStatus(STATUS_UNINITIALIZED),
-        mUsePartialResultQuirk(false),
+        mUsePartialResult(false),
+        mNumPartialResults(1),
         mNextResultFrameNumber(0),
         mNextShutterFrameNumber(0),
         mListener(NULL)
@@ -180,13 +181,20 @@
     mNeedConfig = true;
     mPauseStateNotify = false;
 
-    /** Check for quirks */
-
     // Will the HAL be sending in early partial result metadata?
-    camera_metadata_entry partialResultsQuirk =
-            mDeviceInfo.find(ANDROID_QUIRKS_USE_PARTIAL_RESULT);
-    if (partialResultsQuirk.count > 0 && partialResultsQuirk.data.u8[0] == 1) {
-        mUsePartialResultQuirk = true;
+    if (mDeviceVersion >= CAMERA_DEVICE_API_VERSION_3_2) {
+        camera_metadata_entry partialResultsCount =
+                mDeviceInfo.find(ANDROID_REQUEST_PARTIAL_RESULT_COUNT);
+        if (partialResultsCount.count > 0) {
+            mNumPartialResults = partialResultsCount.data.i32[0];
+            mUsePartialResult = (mNumPartialResults > 1);
+        }
+    } else {
+        camera_metadata_entry partialResultsQuirk =
+                mDeviceInfo.find(ANDROID_QUIRKS_USE_PARTIAL_RESULT);
+        if (partialResultsQuirk.count > 0 && partialResultsQuirk.data.u8[0] == 1) {
+            mUsePartialResult = true;
+        }
     }
 
     return OK;
@@ -1267,6 +1275,12 @@
     return res;
 }
 
+uint32_t Camera3Device::getDeviceVersion() {
+    ATRACE_CALL();
+    Mutex::Autolock il(mInterfaceLock);
+    return mDeviceVersion;
+}
+
 /**
  * Methods called by subclasses
  */
@@ -1545,11 +1559,10 @@
 }
 
 /**
- * QUIRK(partial results)
  * Check if all 3A fields are ready, and send off a partial 3A-only result
  * to the output frame queue
  */
-bool Camera3Device::processPartial3AQuirk(
+bool Camera3Device::processPartial3AResult(
         uint32_t frameNumber,
         const CameraMetadata& partial, const CaptureResultExtras& resultExtras) {
 
@@ -1601,7 +1614,7 @@
     // In addition to the above fields, this means adding in
     //   android.request.frameCount
     //   android.request.requestId
-    //   android.quirks.partialResult
+    //   android.quirks.partialResult (for HAL version below HAL3.2)
 
     const size_t kMinimal3AResultEntries = 10;
 
@@ -1627,10 +1640,12 @@
         return false;
     }
 
-    static const uint8_t partialResult = ANDROID_QUIRKS_PARTIAL_RESULT_PARTIAL;
-    if (!insert3AResult(min3AResult.mMetadata, ANDROID_QUIRKS_PARTIAL_RESULT,
-            &partialResult, frameNumber)) {
-        return false;
+    if (mDeviceVersion < CAMERA_DEVICE_API_VERSION_3_2) {
+        static const uint8_t partialResult = ANDROID_QUIRKS_PARTIAL_RESULT_PARTIAL;
+        if (!insert3AResult(min3AResult.mMetadata, ANDROID_QUIRKS_PARTIAL_RESULT,
+                &partialResult, frameNumber)) {
+            return false;
+        }
     }
 
     if (!insert3AResult(min3AResult.mMetadata, ANDROID_CONTROL_AF_MODE,
@@ -1668,6 +1683,9 @@
         return false;
     }
 
+    // We only send the aggregated partial when all 3A related metadata are available
+    // For both API1 and API2.
+    // TODO: we probably should pass through all partials to API2 unconditionally.
     mResultSignal.signal();
 
     return true;
@@ -1726,8 +1744,21 @@
                 frameNumber);
         return;
     }
-    bool partialResultQuirk = false;
-    CameraMetadata collectedQuirkResult;
+
+    // For HAL3.2 or above, If HAL doesn't support partial, it must always set
+    // partial_result to 1 when metadata is included in this result.
+    if (!mUsePartialResult &&
+            mDeviceVersion >= CAMERA_DEVICE_API_VERSION_3_2 &&
+            result->result != NULL &&
+            result->partial_result != 1) {
+        SET_ERR("Result is malformed for frame %d: partial_result %u must be 1"
+                " if partial result is not supported",
+                frameNumber, result->partial_result);
+        return;
+    }
+
+    bool isPartialResult = false;
+    CameraMetadata collectedPartialResult;
     CaptureResultExtras resultExtras;
     bool hasInputBufferInRequest = false;
 
@@ -1749,28 +1780,46 @@
                 ", burstId = %" PRId32,
                 __FUNCTION__, request.resultExtras.requestId, request.resultExtras.frameNumber,
                 request.resultExtras.burstId);
+        // Always update the partial count to the latest one. When framework aggregates adjacent
+        // partial results into one, the latest partial count will be used.
+        request.resultExtras.partialResultCount = result->partial_result;
 
         // Check if this result carries only partial metadata
-        if (mUsePartialResultQuirk && result->result != NULL) {
-            camera_metadata_ro_entry_t partialResultEntry;
-            res = find_camera_metadata_ro_entry(result->result,
-                    ANDROID_QUIRKS_PARTIAL_RESULT, &partialResultEntry);
-            if (res != NAME_NOT_FOUND &&
-                    partialResultEntry.count > 0 &&
-                    partialResultEntry.data.u8[0] ==
-                    ANDROID_QUIRKS_PARTIAL_RESULT_PARTIAL) {
-                // A partial result. Flag this as such, and collect this
-                // set of metadata into the in-flight entry.
-                partialResultQuirk = true;
-                request.partialResultQuirk.collectedResult.append(
+        if (mUsePartialResult && result->result != NULL) {
+            if (mDeviceVersion >= CAMERA_DEVICE_API_VERSION_3_2) {
+                if (result->partial_result > mNumPartialResults || result->partial_result < 1) {
+                    SET_ERR("Result is malformed for frame %d: partial_result %u must be  in"
+                            " the range of [1, %d] when metadata is included in the result",
+                            frameNumber, result->partial_result, mNumPartialResults);
+                    return;
+                }
+                isPartialResult = (result->partial_result < mNumPartialResults);
+                request.partialResult.collectedResult.append(
                     result->result);
-                request.partialResultQuirk.collectedResult.erase(
-                    ANDROID_QUIRKS_PARTIAL_RESULT);
+            } else {
+                camera_metadata_ro_entry_t partialResultEntry;
+                res = find_camera_metadata_ro_entry(result->result,
+                        ANDROID_QUIRKS_PARTIAL_RESULT, &partialResultEntry);
+                if (res != NAME_NOT_FOUND &&
+                        partialResultEntry.count > 0 &&
+                        partialResultEntry.data.u8[0] ==
+                        ANDROID_QUIRKS_PARTIAL_RESULT_PARTIAL) {
+                    // A partial result. Flag this as such, and collect this
+                    // set of metadata into the in-flight entry.
+                    isPartialResult = true;
+                    request.partialResult.collectedResult.append(
+                        result->result);
+                    request.partialResult.collectedResult.erase(
+                        ANDROID_QUIRKS_PARTIAL_RESULT);
+                }
+            }
+
+            if (isPartialResult) {
                 // Fire off a 3A-only result if possible
-                if (!request.partialResultQuirk.haveSent3A) {
-                    request.partialResultQuirk.haveSent3A =
-                            processPartial3AQuirk(frameNumber,
-                                    request.partialResultQuirk.collectedResult,
+                if (!request.partialResult.haveSent3A) {
+                    request.partialResult.haveSent3A =
+                            processPartial3AResult(frameNumber,
+                                    request.partialResult.collectedResult,
                                     request.resultExtras);
                 }
             }
@@ -1786,23 +1835,23 @@
          * - CAMERA3_MSG_SHUTTER (expected during normal operation)
          * - CAMERA3_MSG_ERROR (expected during flush)
          */
-        if (request.requestStatus == OK && timestamp == 0 && !partialResultQuirk) {
+        if (request.requestStatus == OK && timestamp == 0 && !isPartialResult) {
             SET_ERR("Called before shutter notify for frame %d",
                     frameNumber);
             return;
         }
 
         // Did we get the (final) result metadata for this capture?
-        if (result->result != NULL && !partialResultQuirk) {
+        if (result->result != NULL && !isPartialResult) {
             if (request.haveResultMetadata) {
                 SET_ERR("Called multiple times with metadata for frame %d",
                         frameNumber);
                 return;
             }
-            if (mUsePartialResultQuirk &&
-                    !request.partialResultQuirk.collectedResult.isEmpty()) {
-                collectedQuirkResult.acquire(
-                    request.partialResultQuirk.collectedResult);
+            if (mUsePartialResult &&
+                    !request.partialResult.collectedResult.isEmpty()) {
+                collectedPartialResult.acquire(
+                    request.partialResult.collectedResult);
             }
             request.haveResultMetadata = true;
         }
@@ -1842,7 +1891,7 @@
 
     // Process the result metadata, if provided
     bool gotResult = false;
-    if (result->result != NULL && !partialResultQuirk) {
+    if (result->result != NULL && !isPartialResult) {
         Mutex::Autolock l(mOutputLock);
 
         gotResult = true;
@@ -1871,8 +1920,8 @@
         }
 
         // Append any previous partials to form a complete result
-        if (mUsePartialResultQuirk && !collectedQuirkResult.isEmpty()) {
-            captureResult.mMetadata.append(collectedQuirkResult);
+        if (mUsePartialResult && !collectedPartialResult.isEmpty()) {
+            captureResult.mMetadata.append(collectedPartialResult);
         }
 
         captureResult.mMetadata.sort();
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index ea958b7..b1b0033 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -135,6 +135,8 @@
 
     virtual status_t flush(int64_t *lastFrameNumber = NULL);
 
+    virtual uint32_t getDeviceVersion();
+
     // Methods called by subclasses
     void             notifyStatus(bool idle); // updates from StatusTracker
 
@@ -168,7 +170,7 @@
 
     CameraMetadata             mDeviceInfo;
 
-    int                        mDeviceVersion;
+    uint32_t                   mDeviceVersion;
 
     enum Status {
         STATUS_ERROR,
@@ -199,8 +201,11 @@
     // Need to hold on to stream references until configure completes.
     Vector<sp<camera3::Camera3StreamInterface> > mDeletedStreams;
 
-    // Whether quirk ANDROID_QUIRKS_USE_PARTIAL_RESULT is enabled
-    bool                       mUsePartialResultQuirk;
+    // Whether the HAL will send partial result
+    bool                       mUsePartialResult;
+
+    // Number of partial results that will be delivered by the HAL.
+    uint32_t                   mNumPartialResults;
 
     /**** End scope for mLock ****/
 
@@ -507,17 +512,17 @@
         // If this request has any input buffer
         bool hasInputBuffer;
 
-        // Fields used by the partial result quirk only
-        struct PartialResultQuirkInFlight {
+        // Fields used by the partial result only
+        struct PartialResultInFlight {
             // Set by process_capture_result once 3A has been sent to clients
             bool    haveSent3A;
             // Result metadata collected so far, when partial results are in use
             CameraMetadata collectedResult;
 
-            PartialResultQuirkInFlight():
+            PartialResultInFlight():
                     haveSent3A(false) {
             }
-        } partialResultQuirk;
+        } partialResult;
 
         // Default constructor needed by KeyedVector
         InFlightRequest() :
@@ -564,11 +569,11 @@
             int32_t numBuffers, CaptureResultExtras resultExtras, bool hasInput);
 
     /**
-     * For the partial result quirk, check if all 3A state fields are available
+     * For the partial result, check if all 3A state fields are available
      * and if so, queue up 3A-only result to the client. Returns true if 3A
      * is sent.
      */
-    bool processPartial3AQuirk(uint32_t frameNumber,
+    bool processPartial3AResult(uint32_t frameNumber,
             const CameraMetadata& partial, const CaptureResultExtras& resultExtras);
 
     // Helpers for reading and writing 3A metadata into to/from partial results