Merge "Revert "Revert "sound trigger: added permission checks""" into lmp-dev
diff --git a/camera/CameraUtils.cpp b/camera/CameraUtils.cpp
index 3ff181d..1ff63ab 100644
--- a/camera/CameraUtils.cpp
+++ b/camera/CameraUtils.cpp
@@ -73,18 +73,25 @@
                 return INVALID_OPERATION;
         }
     } else {
+        // Front camera needs to be horizontally flipped for
+        // mirror-like behavior.
+        // Note: Flips are applied before rotates.
         switch (orientation) {
             case 0:
-                flags = HAL_TRANSFORM_FLIP_H;
+                flags = NATIVE_WINDOW_TRANSFORM_FLIP_H;
                 break;
             case 90:
-                flags = HAL_TRANSFORM_FLIP_H | HAL_TRANSFORM_ROT_90;
+                flags = NATIVE_WINDOW_TRANSFORM_FLIP_H |
+                        NATIVE_WINDOW_TRANSFORM_ROT_270;
                 break;
             case 180:
-                flags = HAL_TRANSFORM_FLIP_V;
+                flags = NATIVE_WINDOW_TRANSFORM_FLIP_H |
+                        NATIVE_WINDOW_TRANSFORM_ROT_180;
                 break;
             case 270:
-                flags = HAL_TRANSFORM_FLIP_V | HAL_TRANSFORM_ROT_90;
+                flags = NATIVE_WINDOW_TRANSFORM_FLIP_H |
+                        NATIVE_WINDOW_TRANSFORM_ROT_90;
+
                 break;
             default:
                 ALOGE("%s: Invalid HAL android.sensor.orientation value: %d",
diff --git a/camera/CaptureResult.cpp b/camera/CaptureResult.cpp
index c016e52..4e36160 100644
--- a/camera/CaptureResult.cpp
+++ b/camera/CaptureResult.cpp
@@ -37,6 +37,7 @@
     parcel->readInt32(&afTriggerId);
     parcel->readInt32(&precaptureTriggerId);
     parcel->readInt64(&frameNumber);
+    parcel->readInt32(&partialResultCount);
 
     return OK;
 }
@@ -52,6 +53,7 @@
     parcel->writeInt32(afTriggerId);
     parcel->writeInt32(precaptureTriggerId);
     parcel->writeInt64(frameNumber);
+    parcel->writeInt32(partialResultCount);
 
     return OK;
 }
diff --git a/include/camera/CaptureResult.h b/include/camera/CaptureResult.h
index 6e47a16..0be7d6f 100644
--- a/include/camera/CaptureResult.h
+++ b/include/camera/CaptureResult.h
@@ -53,6 +53,11 @@
     int64_t frameNumber;
 
     /**
+     * The partial result count (index) for this capture result.
+     */
+    int32_t partialResultCount;
+
+    /**
      * Constructor initializes object as invalid by setting requestId to be -1.
      */
     CaptureResultExtras()
@@ -60,7 +65,8 @@
           burstId(0),
           afTriggerId(0),
           precaptureTriggerId(0),
-          frameNumber(0) {
+          frameNumber(0),
+          partialResultCount(0) {
     }
 
     /**
diff --git a/include/media/AudioPolicyHelper.h b/include/media/AudioPolicyHelper.h
new file mode 100644
index 0000000..f4afd45
--- /dev/null
+++ b/include/media/AudioPolicyHelper.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef AUDIO_POLICY_HELPER_H_
+#define AUDIO_POLICY_HELPER_H_
+
+#include <system/audio.h>
+
+audio_stream_type_t audio_attributes_to_stream_type(const audio_attributes_t *attr)
+{
+    // flags to stream type mapping
+    if ((attr->flags & AUDIO_FLAG_AUDIBILITY_ENFORCED) == AUDIO_FLAG_AUDIBILITY_ENFORCED) {
+        return AUDIO_STREAM_ENFORCED_AUDIBLE;
+    }
+    if ((attr->flags & AUDIO_FLAG_SCO) == AUDIO_FLAG_SCO) {
+        return AUDIO_STREAM_BLUETOOTH_SCO;
+    }
+
+    // usage to stream type mapping
+    switch (attr->usage) {
+    case AUDIO_USAGE_MEDIA:
+    case AUDIO_USAGE_GAME:
+    case AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY:
+    case AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE:
+        return AUDIO_STREAM_MUSIC;
+    case AUDIO_USAGE_ASSISTANCE_SONIFICATION:
+        return AUDIO_STREAM_SYSTEM;
+    case AUDIO_USAGE_VOICE_COMMUNICATION:
+        return AUDIO_STREAM_VOICE_CALL;
+
+    case AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING:
+        return AUDIO_STREAM_DTMF;
+
+    case AUDIO_USAGE_ALARM:
+        return AUDIO_STREAM_ALARM;
+    case AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE:
+        return AUDIO_STREAM_RING;
+
+    case AUDIO_USAGE_NOTIFICATION:
+    case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST:
+    case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT:
+    case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED:
+    case AUDIO_USAGE_NOTIFICATION_EVENT:
+        return AUDIO_STREAM_NOTIFICATION;
+
+    case AUDIO_USAGE_UNKNOWN:
+    default:
+        return AUDIO_STREAM_MUSIC;
+    }
+}
+
+#endif //AUDIO_POLICY_HELPER_H_
diff --git a/include/media/AudioRecord.h b/include/media/AudioRecord.h
index f9c7efd..4edc1bf 100644
--- a/include/media/AudioRecord.h
+++ b/include/media/AudioRecord.h
@@ -461,6 +461,7 @@
     // for notification APIs
     uint32_t                mNotificationFramesReq; // requested number of frames between each
                                                     // notification callback
+                                                    // as specified in constructor or set()
     uint32_t                mNotificationFramesAct; // actual number of frames between each
                                                     // notification callback
     bool                    mRefreshRemaining;      // processAudioBuffer() should refresh
diff --git a/include/media/AudioSystem.h b/include/media/AudioSystem.h
index e1aab41..cf34991 100644
--- a/include/media/AudioSystem.h
+++ b/include/media/AudioSystem.h
@@ -139,12 +139,12 @@
     // return the number of input frames lost by HAL implementation, or 0 if the handle is invalid
     static uint32_t getInputFramesLost(audio_io_handle_t ioHandle);
 
-    // Allocate a new audio session ID and return that new ID.
-    // If unable to contact AudioFlinger, returns AUDIO_SESSION_ALLOCATE instead.
-    // FIXME If AudioFlinger were to ever exhaust the session ID namespace,
-    //       this method could fail by returning either AUDIO_SESSION_ALLOCATE
-    //       or an unspecified existing session ID.
-    static int newAudioSessionId();
+    // Allocate a new unique ID for use as an audio session ID or I/O handle.
+    // If unable to contact AudioFlinger, returns AUDIO_UNIQUE_ID_ALLOCATE instead.
+    // FIXME If AudioFlinger were to ever exhaust the unique ID namespace,
+    //       this method could fail by returning either AUDIO_UNIQUE_ID_ALLOCATE
+    //       or an unspecified existing unique ID.
+    static audio_unique_id_t newAudioUniqueId();
 
     static void acquireAudioSessionId(int audioSession, pid_t pid);
     static void releaseAudioSessionId(int audioSession, pid_t pid);
@@ -234,11 +234,15 @@
                                     uint32_t samplingRate,
                                     audio_format_t format,
                                     audio_channel_mask_t channelMask,
-                                    int sessionId);
+                                    int sessionId,
+                                    audio_input_flags_t);
 
-    static status_t startInput(audio_io_handle_t input);
-    static status_t stopInput(audio_io_handle_t input);
-    static void releaseInput(audio_io_handle_t input);
+    static status_t startInput(audio_io_handle_t input,
+                               audio_session_t session);
+    static status_t stopInput(audio_io_handle_t input,
+                              audio_session_t session);
+    static void releaseInput(audio_io_handle_t input,
+                             audio_session_t session);
     static status_t initStreamVolume(audio_stream_type_t stream,
                                       int indexMin,
                                       int indexMax);
diff --git a/include/media/IAudioFlinger.h b/include/media/IAudioFlinger.h
index a8f4605..82ec09c 100644
--- a/include/media/IAudioFlinger.h
+++ b/include/media/IAudioFlinger.h
@@ -145,25 +145,26 @@
     virtual size_t getInputBufferSize(uint32_t sampleRate, audio_format_t format,
             audio_channel_mask_t channelMask) const = 0;
 
-    virtual audio_io_handle_t openOutput(audio_module_handle_t module,
-                                         audio_devices_t *pDevices,
-                                         uint32_t *pSamplingRate,
-                                         audio_format_t *pFormat,
-                                         audio_channel_mask_t *pChannelMask,
-                                         uint32_t *pLatencyMs,
-                                         audio_output_flags_t flags,
-                                         const audio_offload_info_t *offloadInfo = NULL) = 0;
+    virtual status_t openOutput(audio_module_handle_t module,
+                                audio_io_handle_t *output,
+                                audio_config_t *config,
+                                audio_devices_t *devices,
+                                const String8& address,
+                                uint32_t *latencyMs,
+                                audio_output_flags_t flags) = 0;
     virtual audio_io_handle_t openDuplicateOutput(audio_io_handle_t output1,
                                     audio_io_handle_t output2) = 0;
     virtual status_t closeOutput(audio_io_handle_t output) = 0;
     virtual status_t suspendOutput(audio_io_handle_t output) = 0;
     virtual status_t restoreOutput(audio_io_handle_t output) = 0;
 
-    virtual audio_io_handle_t openInput(audio_module_handle_t module,
-                                        audio_devices_t *pDevices,
-                                        uint32_t *pSamplingRate,
-                                        audio_format_t *pFormat,
-                                        audio_channel_mask_t *pChannelMask) = 0;
+    virtual status_t openInput(audio_module_handle_t module,
+                               audio_io_handle_t *input,
+                               audio_config_t *config,
+                               audio_devices_t *device,
+                               const String8& address,
+                               audio_source_t source,
+                               audio_input_flags_t flags) = 0;
     virtual status_t closeInput(audio_io_handle_t input) = 0;
 
     virtual status_t invalidateStream(audio_stream_type_t stream) = 0;
@@ -175,7 +176,7 @@
 
     virtual uint32_t getInputFramesLost(audio_io_handle_t ioHandle) const = 0;
 
-    virtual int newAudioSessionId() = 0;
+    virtual audio_unique_id_t newAudioUniqueId() = 0;
 
     virtual void acquireAudioSessionId(int audioSession, pid_t pid) = 0;
     virtual void releaseAudioSessionId(int audioSession, pid_t pid) = 0;
diff --git a/include/media/IAudioPolicyService.h b/include/media/IAudioPolicyService.h
index 959e4c3..abbda32 100644
--- a/include/media/IAudioPolicyService.h
+++ b/include/media/IAudioPolicyService.h
@@ -70,13 +70,17 @@
                                 int session = 0) = 0;
     virtual void releaseOutput(audio_io_handle_t output) = 0;
     virtual audio_io_handle_t getInput(audio_source_t inputSource,
-                                    uint32_t samplingRate = 0,
-                                    audio_format_t format = AUDIO_FORMAT_DEFAULT,
-                                    audio_channel_mask_t channelMask = 0,
-                                    int audioSession = 0) = 0;
-    virtual status_t startInput(audio_io_handle_t input) = 0;
-    virtual status_t stopInput(audio_io_handle_t input) = 0;
-    virtual void releaseInput(audio_io_handle_t input) = 0;
+                                    uint32_t samplingRate,
+                                    audio_format_t format,
+                                    audio_channel_mask_t channelMask,
+                                    int audioSession,
+                                    audio_input_flags_t flags) = 0;
+    virtual status_t startInput(audio_io_handle_t input,
+                                audio_session_t session) = 0;
+    virtual status_t stopInput(audio_io_handle_t input,
+                               audio_session_t session) = 0;
+    virtual void releaseInput(audio_io_handle_t input,
+                              audio_session_t session) = 0;
     virtual status_t initStreamVolume(audio_stream_type_t stream,
                                       int indexMin,
                                       int indexMax) = 0;
diff --git a/include/media/MediaProfiles.h b/include/media/MediaProfiles.h
index d202fbc..253c557 100644
--- a/include/media/MediaProfiles.h
+++ b/include/media/MediaProfiles.h
@@ -47,6 +47,14 @@
     CAMCORDER_QUALITY_TIME_LAPSE_QVGA = 1007,
     CAMCORDER_QUALITY_TIME_LAPSE_2160P = 1008,
     CAMCORDER_QUALITY_TIME_LAPSE_LIST_END = 1008,
+
+    CAMCORDER_QUALITY_HIGH_SPEED_LIST_START = 2000,
+    CAMCORDER_QUALITY_HIGH_SPEED_LOW  = 2000,
+    CAMCORDER_QUALITY_HIGH_SPEED_HIGH = 2001,
+    CAMCORDER_QUALITY_HIGH_SPEED_480P = 2002,
+    CAMCORDER_QUALITY_HIGH_SPEED_720P = 2003,
+    CAMCORDER_QUALITY_HIGH_SPEED_1080P = 2004,
+    CAMCORDER_QUALITY_HIGH_SPEED_LIST_END = 2004,
 };
 
 /**
diff --git a/include/media/SoundPool.h b/include/media/SoundPool.h
index 2dd78cc..5830475 100644
--- a/include/media/SoundPool.h
+++ b/include/media/SoundPool.h
@@ -167,7 +167,7 @@
     friend class SoundPoolThread;
     friend class SoundChannel;
 public:
-    SoundPool(int maxChannels, audio_stream_type_t streamType, int srcQuality);
+    SoundPool(int maxChannels, const audio_attributes_t* pAttributes);
     ~SoundPool();
     int load(const char* url, int priority);
     int load(int fd, int64_t offset, int64_t length, int priority);
@@ -183,8 +183,7 @@
     void setPriority(int channelID, int priority);
     void setLoop(int channelID, int loop);
     void setRate(int channelID, float rate);
-    audio_stream_type_t streamType() const { return mStreamType; }
-    int srcQuality() const { return mSrcQuality; }
+    const audio_attributes_t* attributes() { return &mAttributes; }
 
     // called from SoundPoolThread
     void sampleLoaded(int sampleID);
@@ -225,8 +224,7 @@
     List<SoundChannel*>     mStop;
     DefaultKeyedVector< int, sp<Sample> >   mSamples;
     int                     mMaxChannels;
-    audio_stream_type_t     mStreamType;
-    int                     mSrcQuality;
+    audio_attributes_t      mAttributes;
     int                     mAllocated;
     int                     mNextSampleID;
     int                     mNextChannelID;
diff --git a/include/media/mediarecorder.h b/include/media/mediarecorder.h
index 142cb90..b0a62a7 100644
--- a/include/media/mediarecorder.h
+++ b/include/media/mediarecorder.h
@@ -61,12 +61,18 @@
     OUTPUT_FORMAT_AAC_ADIF = 5,
     OUTPUT_FORMAT_AAC_ADTS = 6,
 
+    OUTPUT_FORMAT_AUDIO_ONLY_END = 7, // Used in validating the output format.  Should be the
+                                      //  at the end of the audio only output formats.
+
     /* Stream over a socket, limited to a single stream */
     OUTPUT_FORMAT_RTP_AVP = 7,
 
     /* H.264/AAC data encapsulated in MPEG2/TS */
     OUTPUT_FORMAT_MPEG2TS = 8,
 
+    /* VP8/VORBIS data in a WEBM container */
+    OUTPUT_FORMAT_WEBM = 9,
+
     OUTPUT_FORMAT_LIST_END // must be last - used to validate format type
 };
 
@@ -77,6 +83,7 @@
     AUDIO_ENCODER_AAC = 3,
     AUDIO_ENCODER_HE_AAC = 4,
     AUDIO_ENCODER_AAC_ELD = 5,
+    AUDIO_ENCODER_VORBIS = 6,
 
     AUDIO_ENCODER_LIST_END // must be the last - used to validate the audio encoder type
 };
@@ -86,6 +93,7 @@
     VIDEO_ENCODER_H263 = 1,
     VIDEO_ENCODER_H264 = 2,
     VIDEO_ENCODER_MPEG_4_SP = 3,
+    VIDEO_ENCODER_VP8 = 4,
 
     VIDEO_ENCODER_LIST_END // must be the last - used to validate the video encoder type
 };
diff --git a/include/media/stagefright/ACodec.h b/include/media/stagefright/ACodec.h
index 142b7cb..7e4a1d9 100644
--- a/include/media/stagefright/ACodec.h
+++ b/include/media/stagefright/ACodec.h
@@ -32,6 +32,7 @@
 
 struct ABuffer;
 struct MemoryDealer;
+struct DescribeColorFormatParams;
 
 struct ACodec : public AHierarchicalStateMachine, public CodecBase {
     ACodec();
@@ -189,6 +190,7 @@
 
     int32_t mEncoderDelay;
     int32_t mEncoderPadding;
+    int32_t mRotationDegrees;
 
     bool mChannelMaskPresent;
     int32_t mChannelMask;
@@ -236,7 +238,7 @@
     status_t setSupportedOutputFormat();
 
     status_t setupVideoDecoder(
-            const char *mime, int32_t width, int32_t height);
+            const char *mime, const sp<AMessage> &msg);
 
     status_t setupVideoEncoder(
             const char *mime, const sp<AMessage> &msg);
@@ -305,6 +307,8 @@
             OMX_ERRORTYPE error = OMX_ErrorUndefined,
             status_t internalError = UNKNOWN_ERROR);
 
+    static void describeDefaultColorFormat(DescribeColorFormatParams &describeParams);
+
     status_t requestIDRFrame();
     status_t setParameters(const sp<AMessage> &params);
 
diff --git a/include/media/stagefright/MPEG4Writer.h b/include/media/stagefright/MPEG4Writer.h
index 3ef6b9a..26ce5f9 100644
--- a/include/media/stagefright/MPEG4Writer.h
+++ b/include/media/stagefright/MPEG4Writer.h
@@ -63,8 +63,8 @@
     int32_t getTimeScale() const { return mTimeScale; }
 
     status_t setGeoData(int latitudex10000, int longitudex10000);
-    void setStartTimeOffsetMs(int ms) { mStartTimeOffsetMs = ms; }
-    int32_t getStartTimeOffsetMs() const { return mStartTimeOffsetMs; }
+    virtual void setStartTimeOffsetMs(int ms) { mStartTimeOffsetMs = ms; }
+    virtual int32_t getStartTimeOffsetMs() const { return mStartTimeOffsetMs; }
 
 protected:
     virtual ~MPEG4Writer();
diff --git a/include/media/stagefright/MediaBufferGroup.h b/include/media/stagefright/MediaBufferGroup.h
index 0488292..a006f7f 100644
--- a/include/media/stagefright/MediaBufferGroup.h
+++ b/include/media/stagefright/MediaBufferGroup.h
@@ -34,9 +34,12 @@
 
     void add_buffer(MediaBuffer *buffer);
 
-    // Blocks until a buffer is available and returns it to the caller,
-    // the returned buffer will have a reference count of 1.
-    status_t acquire_buffer(MediaBuffer **buffer);
+    // If nonBlocking is false, it blocks until a buffer is available and
+    // passes it to the caller in *buffer, while returning OK.
+    // The returned buffer will have a reference count of 1.
+    // If nonBlocking is true and a buffer is not immediately available,
+    // buffer is set to NULL and it returns WOULD_BLOCK.
+    status_t acquire_buffer(MediaBuffer **buffer, bool nonBlocking = false);
 
 protected:
     virtual void signalBufferReturned(MediaBuffer *buffer);
diff --git a/include/media/stagefright/MediaCodec.h b/include/media/stagefright/MediaCodec.h
index 3f7508b..4ff0d62 100644
--- a/include/media/stagefright/MediaCodec.h
+++ b/include/media/stagefright/MediaCodec.h
@@ -30,6 +30,7 @@
 struct AString;
 struct CodecBase;
 struct ICrypto;
+struct IBatteryStats;
 struct SoftwareRenderer;
 struct Surface;
 
@@ -51,6 +52,8 @@
         CB_OUTPUT_FORMAT_CHANGED = 4,
     };
 
+    struct BatteryNotifier;
+
     static sp<MediaCodec> CreateByType(
             const sp<ALooper> &looper, const char *mime, bool encoder);
 
@@ -154,6 +157,7 @@
         STARTING,
         STARTED,
         FLUSHING,
+        FLUSHED,
         STOPPING,
         RELEASING,
     };
@@ -225,6 +229,9 @@
     sp<AMessage> mInputFormat;
     sp<AMessage> mCallback;
 
+    bool mBatteryStatNotified;
+    bool mIsVideo;
+
     // initial create parameters
     AString mInitName;
     bool mInitNameIsType;
@@ -294,6 +301,8 @@
     status_t onSetParameters(const sp<AMessage> &params);
 
     status_t amendOutputFormatWithCodecSpecificData(const sp<ABuffer> &buffer);
+    void updateBatteryStat();
+    bool isExecuting() const;
 
     DISALLOW_EVIL_CONSTRUCTORS(MediaCodec);
 };
diff --git a/include/media/stagefright/MediaCodecList.h b/include/media/stagefright/MediaCodecList.h
index 01a5daf..c11fcc9 100644
--- a/include/media/stagefright/MediaCodecList.h
+++ b/include/media/stagefright/MediaCodecList.h
@@ -25,9 +25,12 @@
 #include <utils/Errors.h>
 #include <utils/KeyedVector.h>
 #include <utils/Vector.h>
+#include <utils/StrongPointer.h>
 
 namespace android {
 
+struct AMessage;
+
 struct MediaCodecList {
     static const MediaCodecList *getInstance();
 
@@ -51,15 +54,19 @@
             size_t index, const char *type,
             Vector<ProfileLevel> *profileLevels,
             Vector<uint32_t> *colorFormats,
-            uint32_t *flags) const;
+            uint32_t *flags,
+            // TODO default argument is only for compatibility with existing JNI
+            sp<AMessage> *capabilities = NULL) const;
 
 private:
     enum Section {
         SECTION_TOPLEVEL,
         SECTION_DECODERS,
         SECTION_DECODER,
+        SECTION_DECODER_TYPE,
         SECTION_ENCODERS,
         SECTION_ENCODER,
+        SECTION_ENCODER_TYPE,
         SECTION_INCLUDE,
     };
 
@@ -67,7 +74,10 @@
         AString mName;
         bool mIsEncoder;
         uint32_t mTypes;
+        uint32_t mSoleType;
         uint32_t mQuirks;
+        KeyedVector<uint32_t, sp<AMessage> > mCaps;
+        sp<AMessage> mCurrentCaps;
     };
 
     static MediaCodecList *sCodecList;
@@ -103,6 +113,8 @@
 
     status_t addQuirk(const char **attrs);
     status_t addTypeFromAttributes(const char **attrs);
+    status_t addLimit(const char **attrs);
+    status_t addFeature(const char **attrs);
     void addType(const char *name);
 
     DISALLOW_EVIL_CONSTRUCTORS(MediaCodecList);
diff --git a/include/media/stagefright/MediaSource.h b/include/media/stagefright/MediaSource.h
index 204d1c6..a653db9 100644
--- a/include/media/stagefright/MediaSource.h
+++ b/include/media/stagefright/MediaSource.h
@@ -82,6 +82,10 @@
         void setLateBy(int64_t lateness_us);
         int64_t getLateBy() const;
 
+        void setNonBlocking();
+        void clearNonBlocking();
+        bool getNonBlocking() const;
+
     private:
         enum Options {
             kSeekTo_Option      = 1,
@@ -91,6 +95,7 @@
         int64_t mSeekTimeUs;
         SeekMode mSeekMode;
         int64_t mLatenessUs;
+        bool mNonBlocking;
     };
 
     // Causes this source to suspend pulling data from its upstream source
diff --git a/include/media/stagefright/MediaWriter.h b/include/media/stagefright/MediaWriter.h
index 5cc8dcf..e27ea1d 100644
--- a/include/media/stagefright/MediaWriter.h
+++ b/include/media/stagefright/MediaWriter.h
@@ -48,6 +48,9 @@
         return OK;
     }
 
+    virtual void setStartTimeOffsetMs(int ms) {}
+    virtual int32_t getStartTimeOffsetMs() const { return 0; }
+
 protected:
     virtual ~MediaWriter() {}
     int64_t mMaxFileSizeLimitBytes;
diff --git a/include/media/stagefright/foundation/ABase.h b/include/media/stagefright/foundation/ABase.h
index 9eceea3..949d49e 100644
--- a/include/media/stagefright/foundation/ABase.h
+++ b/include/media/stagefright/foundation/ABase.h
@@ -22,4 +22,31 @@
     name(const name &); \
     name &operator=(const name &)
 
+/* Returns true if the size parameter is safe for new array allocation (32-bit)
+ *
+ * Example usage:
+ *
+ * if (!isSafeArraySize<uint32_t>(arraySize)) {
+ *     return BAD_VALUE;
+ * }
+ * ...
+ * uint32_t *myArray = new uint32_t[arraySize];
+ *
+ * There is a bug in gcc versions earlier than 4.8 where the new[] array allocation
+ * will overflow in the internal 32 bit heap allocation, resulting in an
+ * underallocated array. This is a security issue that allows potential overwriting
+ * of other heap data.
+ *
+ * An alternative to checking is to create a safe new array template function which
+ * either throws a std::bad_alloc exception or returns NULL/nullptr_t; NULL considered
+ * safe since normal access of NULL throws an exception.
+ *
+ * https://securityblog.redhat.com/2012/10/31/array-allocation-in-cxx/
+ */
+template <typename T, typename S>
+bool isSafeArraySize(S size) {
+    return size >= 0                            // in case S is signed, ignored if not.
+            && size <= 0xffffffff / sizeof(T);  // max-unsigned-32-bit-int / element-size.
+}
+
 #endif  // A_BASE_H_
diff --git a/include/media/stagefright/foundation/ABuffer.h b/include/media/stagefright/foundation/ABuffer.h
index 28f0aed..602f7ab 100644
--- a/include/media/stagefright/foundation/ABuffer.h
+++ b/include/media/stagefright/foundation/ABuffer.h
@@ -42,6 +42,9 @@
 
     void setRange(size_t offset, size_t size);
 
+    // create buffer from dup of some memory block
+    static sp<ABuffer> CreateAsCopy(const void *data, size_t capacity);
+
     void setInt32Data(int32_t data) { mInt32Data = data; }
     int32_t int32Data() const { return mInt32Data; }
 
diff --git a/include/media/stagefright/foundation/AMessage.h b/include/media/stagefright/foundation/AMessage.h
index 7e823eb..5846d6b 100644
--- a/include/media/stagefright/foundation/AMessage.h
+++ b/include/media/stagefright/foundation/AMessage.h
@@ -50,6 +50,7 @@
     void setDouble(const char *name, double value);
     void setPointer(const char *name, void *value);
     void setString(const char *name, const char *s, ssize_t len = -1);
+    void setString(const char *name, const AString &s);
     void setObject(const char *name, const sp<RefBase> &obj);
     void setBuffer(const char *name, const sp<ABuffer> &buffer);
     void setMessage(const char *name, const sp<AMessage> &obj);
@@ -58,6 +59,8 @@
             const char *name,
             int32_t left, int32_t top, int32_t right, int32_t bottom);
 
+    bool contains(const char *name) const;
+
     bool findInt32(const char *name, int32_t *value) const;
     bool findInt64(const char *name, int64_t *value) const;
     bool findSize(const char *name, size_t *value) const;
diff --git a/include/media/stagefright/foundation/AString.h b/include/media/stagefright/foundation/AString.h
index 0edaa1c..4be3c6d 100644
--- a/include/media/stagefright/foundation/AString.h
+++ b/include/media/stagefright/foundation/AString.h
@@ -70,6 +70,9 @@
     size_t hash() const;
 
     bool operator==(const AString &other) const;
+    bool operator!=(const AString &other) const {
+        return !operator==(other);
+    }
     bool operator<(const AString &other) const;
     bool operator>(const AString &other) const;
 
diff --git a/include/private/media/AudioTrackShared.h b/include/private/media/AudioTrackShared.h
index 5116d1e..fa1b20a 100644
--- a/include/private/media/AudioTrackShared.h
+++ b/include/private/media/AudioTrackShared.h
@@ -175,12 +175,11 @@
 
 // Proxy seen by AudioTrack client and AudioRecord client
 class ClientProxy : public Proxy {
-protected:
+public:
     ClientProxy(audio_track_cblk_t* cblk, void *buffers, size_t frameCount, size_t frameSize,
             bool isOut, bool clientInServer);
     virtual ~ClientProxy() { }
 
-public:
     static const struct timespec kForever;
     static const struct timespec kNonBlocking;
 
@@ -394,8 +393,10 @@
 class AudioTrackServerProxy : public ServerProxy {
 public:
     AudioTrackServerProxy(audio_track_cblk_t* cblk, void *buffers, size_t frameCount,
-            size_t frameSize, bool clientInServer = false)
-        : ServerProxy(cblk, buffers, frameCount, frameSize, true /*isOut*/, clientInServer) { }
+            size_t frameSize, bool clientInServer = false, uint32_t sampleRate = 0)
+        : ServerProxy(cblk, buffers, frameCount, frameSize, true /*isOut*/, clientInServer) {
+        mCblk->mSampleRate = sampleRate;
+    }
 protected:
     virtual ~AudioTrackServerProxy() { }
 
@@ -458,9 +459,8 @@
 class AudioRecordServerProxy : public ServerProxy {
 public:
     AudioRecordServerProxy(audio_track_cblk_t* cblk, void *buffers, size_t frameCount,
-            size_t frameSize)
-        : ServerProxy(cblk, buffers, frameCount, frameSize, false /*isOut*/,
-            false /*clientInServer*/) { }
+            size_t frameSize, bool clientInServer)
+        : ServerProxy(cblk, buffers, frameCount, frameSize, false /*isOut*/, clientInServer) { }
 protected:
     virtual ~AudioRecordServerProxy() { }
 };
diff --git a/media/img_utils/src/TiffEntry.cpp b/media/img_utils/src/TiffEntry.cpp
index 9cea721..1b20e36 100644
--- a/media/img_utils/src/TiffEntry.cpp
+++ b/media/img_utils/src/TiffEntry.cpp
@@ -203,14 +203,20 @@
             }
             break;
         }
-        case FLOAT:
-        case DOUBLE: {
+        case FLOAT: {
             const float* typed_data = getData<float>();
             for (size_t i = 0; i < cappedCount; ++i) {
                 output.appendFormat("%f ", typed_data[i]);
             }
             break;
         }
+        case DOUBLE: {
+            const double* typed_data = getData<double>();
+            for (size_t i = 0; i < cappedCount; ++i) {
+                output.appendFormat("%f ", typed_data[i]);
+            }
+            break;
+        }
         default: {
             output.append("unknown type ");
             break;
diff --git a/media/img_utils/src/TiffWriter.cpp b/media/img_utils/src/TiffWriter.cpp
index d85289e..ac41734 100644
--- a/media/img_utils/src/TiffWriter.cpp
+++ b/media/img_utils/src/TiffWriter.cpp
@@ -66,10 +66,6 @@
         return BAD_VALUE;
     }
 
-    if (LOG_NDEBUG == 0) {
-        log();
-    }
-
     uint32_t totalSize = getTotalSize();
 
     KeyedVector<uint32_t, uint32_t> offsetVector;
@@ -104,7 +100,9 @@
         ifd = ifd->getNextIfd();
     }
 
-    log();
+    if (LOG_NDEBUG == 0) {
+        log();
+    }
 
     for (size_t i = 0; i < offVecSize; ++i) {
         uint32_t ifdKey = offsetVector.keyAt(i);
diff --git a/media/libmedia/AudioRecord.cpp b/media/libmedia/AudioRecord.cpp
index 3ee5809..9e7ba88 100644
--- a/media/libmedia/AudioRecord.cpp
+++ b/media/libmedia/AudioRecord.cpp
@@ -211,10 +211,10 @@
     mReqFrameCount = frameCount;
 
     mNotificationFramesReq = notificationFrames;
-    mNotificationFramesAct = 0;
+    // mNotificationFramesAct is initialized in openRecord_l
 
     if (sessionId == AUDIO_SESSION_ALLOCATE) {
-        mSessionId = AudioSystem::newAudioSessionId();
+        mSessionId = AudioSystem::newAudioUniqueId();
     } else {
         mSessionId = sessionId;
     }
@@ -444,60 +444,25 @@
         }
     }
 
-    // FIXME Assume double buffering, because we don't know the true HAL sample rate
-    const uint32_t nBuffering = 2;
-
-    mNotificationFramesAct = mNotificationFramesReq;
-    size_t frameCount = mReqFrameCount;
-
-    if (!(mFlags & AUDIO_INPUT_FLAG_FAST)) {
-        // validate framecount
-        // If fast track was not requested, this preserves
-        // the old behavior of validating on client side.
-        // FIXME Eventually the validation should be done on server side
-        // regardless of whether it's a fast or normal track.  It's debatable
-        // whether to account for the input latency to provision buffers appropriately.
-        size_t minFrameCount;
-        status = AudioRecord::getMinFrameCount(&minFrameCount,
-                mSampleRate, mFormat, mChannelMask);
-        if (status != NO_ERROR) {
-            ALOGE("getMinFrameCount() failed for sampleRate %u, format %#x, channelMask %#x; "
-                    "status %d",
-                    mSampleRate, mFormat, mChannelMask, status);
-            return status;
-        }
-
-        if (frameCount == 0) {
-            frameCount = minFrameCount;
-        } else if (frameCount < minFrameCount) {
-            ALOGE("frameCount %zu < minFrameCount %zu", frameCount, minFrameCount);
-            return BAD_VALUE;
-        }
-
-        // Make sure that application is notified with sufficient margin before overrun
-        if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/2) {
-            mNotificationFramesAct = frameCount/2;
-        }
-    }
-
     audio_io_handle_t input = AudioSystem::getInput(mInputSource, mSampleRate, mFormat,
-            mChannelMask, mSessionId);
+            mChannelMask, mSessionId, mFlags);
     if (input == AUDIO_IO_HANDLE_NONE) {
         ALOGE("Could not get audio input for record source %d, sample rate %u, format %#x, "
-              "channel mask %#x, session %d",
-              mInputSource, mSampleRate, mFormat, mChannelMask, mSessionId);
+              "channel mask %#x, session %d, flags %#x",
+              mInputSource, mSampleRate, mFormat, mChannelMask, mSessionId, mFlags);
         return BAD_VALUE;
     }
     {
     // Now that we have a reference to an I/O handle and have not yet handed it off to AudioFlinger,
     // we must release it ourselves if anything goes wrong.
 
+    size_t frameCount = mReqFrameCount;
     size_t temp = frameCount;   // temp may be replaced by a revised value of frameCount,
                                 // but we will still need the original value also
     int originalSessionId = mSessionId;
 
     // The notification frame count is the period between callbacks, as suggested by the server.
-    size_t notificationFrames;
+    size_t notificationFrames = mNotificationFramesReq;
 
     sp<IMemory> iMem;           // for cblk
     sp<IMemory> bufferMem;
@@ -576,14 +541,14 @@
             // once denied, do not request again if IAudioRecord is re-created
             mFlags = (audio_input_flags_t) (mFlags & ~AUDIO_INPUT_FLAG_FAST);
         }
-        // Theoretically double-buffering is not required for fast tracks,
-        // due to tighter scheduling.  But in practice, to accomodate kernels with
-        // scheduling jitter, and apps with computation jitter, we use double-buffering.
-        if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/nBuffering) {
-            mNotificationFramesAct = frameCount/nBuffering;
-        }
     }
 
+    // Make sure that application is notified with sufficient margin before overrun
+    if (notificationFrames == 0 || notificationFrames > frameCount) {
+        ALOGW("Received notificationFrames %zu for frameCount %zu", notificationFrames, frameCount);
+    }
+    mNotificationFramesAct = notificationFrames;
+
     // We retain a copy of the I/O handle, but don't own the reference
     mInput = input;
     mRefreshRemaining = true;
@@ -607,7 +572,7 @@
     }
 
 release:
-    AudioSystem::releaseInput(input);
+    AudioSystem::releaseInput(input, (audio_session_t)mSessionId);
     if (status == NO_ERROR) {
         status = NO_INIT;
     }
diff --git a/media/libmedia/AudioSystem.cpp b/media/libmedia/AudioSystem.cpp
index a47d45c..365a594 100644
--- a/media/libmedia/AudioSystem.cpp
+++ b/media/libmedia/AudioSystem.cpp
@@ -427,11 +427,11 @@
     return result;
 }
 
-int AudioSystem::newAudioSessionId()
+audio_unique_id_t AudioSystem::newAudioUniqueId()
 {
     const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
-    if (af == 0) return AUDIO_SESSION_ALLOCATE;
-    return af->newAudioSessionId();
+    if (af == 0) return AUDIO_UNIQUE_ID_ALLOCATE;
+    return af->newAudioUniqueId();
 }
 
 void AudioSystem::acquireAudioSessionId(int audioSession, pid_t pid)
@@ -688,32 +688,36 @@
                                     uint32_t samplingRate,
                                     audio_format_t format,
                                     audio_channel_mask_t channelMask,
-                                    int sessionId)
+                                    int sessionId,
+                                    audio_input_flags_t flags)
 {
     const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
     if (aps == 0) return 0;
-    return aps->getInput(inputSource, samplingRate, format, channelMask, sessionId);
+    return aps->getInput(inputSource, samplingRate, format, channelMask, sessionId, flags);
 }
 
-status_t AudioSystem::startInput(audio_io_handle_t input)
+status_t AudioSystem::startInput(audio_io_handle_t input,
+                                 audio_session_t session)
 {
     const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
-    return aps->startInput(input);
+    return aps->startInput(input, session);
 }
 
-status_t AudioSystem::stopInput(audio_io_handle_t input)
+status_t AudioSystem::stopInput(audio_io_handle_t input,
+                                audio_session_t session)
 {
     const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
-    return aps->stopInput(input);
+    return aps->stopInput(input, session);
 }
 
-void AudioSystem::releaseInput(audio_io_handle_t input)
+void AudioSystem::releaseInput(audio_io_handle_t input,
+                               audio_session_t session)
 {
     const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
     if (aps == 0) return;
-    aps->releaseInput(input);
+    aps->releaseInput(input, session);
 }
 
 status_t AudioSystem::initStreamVolume(audio_stream_type_t stream,
diff --git a/media/libmedia/IAudioFlinger.cpp b/media/libmedia/IAudioFlinger.cpp
index 5cf42f7..5331fce 100644
--- a/media/libmedia/IAudioFlinger.cpp
+++ b/media/libmedia/IAudioFlinger.cpp
@@ -197,6 +197,7 @@
             lSessionId = *sessionId;
         }
         data.writeInt32(lSessionId);
+        data.writeInt64(notificationFrames != NULL ? *notificationFrames : 0);
         cblk.clear();
         buffers.clear();
         status_t lStatus = remote()->transact(OPEN_RECORD, data, &reply);
@@ -433,61 +434,40 @@
         return reply.readInt64();
     }
 
-    virtual audio_io_handle_t openOutput(audio_module_handle_t module,
-                                         audio_devices_t *pDevices,
-                                         uint32_t *pSamplingRate,
-                                         audio_format_t *pFormat,
-                                         audio_channel_mask_t *pChannelMask,
-                                         uint32_t *pLatencyMs,
-                                         audio_output_flags_t flags,
-                                         const audio_offload_info_t *offloadInfo)
+    virtual status_t openOutput(audio_module_handle_t module,
+                                audio_io_handle_t *output,
+                                audio_config_t *config,
+                                audio_devices_t *devices,
+                                const String8& address,
+                                uint32_t *latencyMs,
+                                audio_output_flags_t flags)
     {
+        if (output == NULL || config == NULL || devices == NULL || latencyMs == NULL) {
+            return BAD_VALUE;
+        }
         Parcel data, reply;
-        audio_devices_t devices = pDevices != NULL ? *pDevices : AUDIO_DEVICE_NONE;
-        uint32_t samplingRate = pSamplingRate != NULL ? *pSamplingRate : 0;
-        audio_format_t format = pFormat != NULL ? *pFormat : AUDIO_FORMAT_DEFAULT;
-        audio_channel_mask_t channelMask = pChannelMask != NULL ?
-                *pChannelMask : (audio_channel_mask_t)0;
-        uint32_t latency = pLatencyMs != NULL ? *pLatencyMs : 0;
         data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
         data.writeInt32(module);
-        data.writeInt32(devices);
-        data.writeInt32(samplingRate);
-        data.writeInt32(format);
-        data.writeInt32(channelMask);
-        data.writeInt32(latency);
+        data.write(config, sizeof(audio_config_t));
+        data.writeInt32(*devices);
+        data.writeString8(address);
         data.writeInt32((int32_t) flags);
-        // hasOffloadInfo
-        if (offloadInfo == NULL) {
-            data.writeInt32(0);
-        } else {
-            data.writeInt32(1);
-            data.write(offloadInfo, sizeof(audio_offload_info_t));
+        status_t status = remote()->transact(OPEN_OUTPUT, data, &reply);
+        if (status != NO_ERROR) {
+            *output = AUDIO_IO_HANDLE_NONE;
+            return status;
         }
-        remote()->transact(OPEN_OUTPUT, data, &reply);
-        audio_io_handle_t output = (audio_io_handle_t) reply.readInt32();
-        ALOGV("openOutput() returned output, %d", output);
-        devices = (audio_devices_t)reply.readInt32();
-        if (pDevices != NULL) {
-            *pDevices = devices;
+        status = (status_t)reply.readInt32();
+        if (status != NO_ERROR) {
+            *output = AUDIO_IO_HANDLE_NONE;
+            return status;
         }
-        samplingRate = reply.readInt32();
-        if (pSamplingRate != NULL) {
-            *pSamplingRate = samplingRate;
-        }
-        format = (audio_format_t) reply.readInt32();
-        if (pFormat != NULL) {
-            *pFormat = format;
-        }
-        channelMask = (audio_channel_mask_t)reply.readInt32();
-        if (pChannelMask != NULL) {
-            *pChannelMask = channelMask;
-        }
-        latency = reply.readInt32();
-        if (pLatencyMs != NULL) {
-            *pLatencyMs = latency;
-        }
-        return output;
+        *output = (audio_io_handle_t)reply.readInt32();
+        ALOGV("openOutput() returned output, %d", *output);
+        reply.read(config, sizeof(audio_config_t));
+        *devices = (audio_devices_t)reply.readInt32();
+        *latencyMs = reply.readInt32();
+        return NO_ERROR;
     }
 
     virtual audio_io_handle_t openDuplicateOutput(audio_io_handle_t output1,
@@ -528,44 +508,40 @@
         return reply.readInt32();
     }
 
-    virtual audio_io_handle_t openInput(audio_module_handle_t module,
-                                        audio_devices_t *pDevices,
-                                        uint32_t *pSamplingRate,
-                                        audio_format_t *pFormat,
-                                        audio_channel_mask_t *pChannelMask)
+    virtual status_t openInput(audio_module_handle_t module,
+                               audio_io_handle_t *input,
+                               audio_config_t *config,
+                               audio_devices_t *device,
+                               const String8& address,
+                               audio_source_t source,
+                               audio_input_flags_t flags)
     {
+        if (input == NULL || config == NULL || device == NULL) {
+            return BAD_VALUE;
+        }
         Parcel data, reply;
-        audio_devices_t devices = pDevices != NULL ? *pDevices : AUDIO_DEVICE_NONE;
-        uint32_t samplingRate = pSamplingRate != NULL ? *pSamplingRate : 0;
-        audio_format_t format = pFormat != NULL ? *pFormat : AUDIO_FORMAT_DEFAULT;
-        audio_channel_mask_t channelMask = pChannelMask != NULL ?
-                *pChannelMask : (audio_channel_mask_t)0;
-
         data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
         data.writeInt32(module);
-        data.writeInt32(devices);
-        data.writeInt32(samplingRate);
-        data.writeInt32(format);
-        data.writeInt32(channelMask);
-        remote()->transact(OPEN_INPUT, data, &reply);
-        audio_io_handle_t input = (audio_io_handle_t) reply.readInt32();
-        devices = (audio_devices_t)reply.readInt32();
-        if (pDevices != NULL) {
-            *pDevices = devices;
+        data.writeInt32(*input);
+        data.write(config, sizeof(audio_config_t));
+        data.writeInt32(*device);
+        data.writeString8(address);
+        data.writeInt32(source);
+        data.writeInt32(flags);
+        status_t status = remote()->transact(OPEN_INPUT, data, &reply);
+        if (status != NO_ERROR) {
+            *input = AUDIO_IO_HANDLE_NONE;
+            return status;
         }
-        samplingRate = reply.readInt32();
-        if (pSamplingRate != NULL) {
-            *pSamplingRate = samplingRate;
+        status = (status_t)reply.readInt32();
+        if (status != NO_ERROR) {
+            *input = AUDIO_IO_HANDLE_NONE;
+            return status;
         }
-        format = (audio_format_t) reply.readInt32();
-        if (pFormat != NULL) {
-            *pFormat = format;
-        }
-        channelMask = (audio_channel_mask_t)reply.readInt32();
-        if (pChannelMask != NULL) {
-            *pChannelMask = channelMask;
-        }
-        return input;
+        *input = (audio_io_handle_t)reply.readInt32();
+        reply.read(config, sizeof(audio_config_t));
+        *device = (audio_devices_t)reply.readInt32();
+        return NO_ERROR;
     }
 
     virtual status_t closeInput(int input)
@@ -628,12 +604,12 @@
         return (uint32_t) reply.readInt32();
     }
 
-    virtual int newAudioSessionId()
+    virtual audio_unique_id_t newAudioUniqueId()
     {
         Parcel data, reply;
         data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
         status_t status = remote()->transact(NEW_AUDIO_SESSION_ID, data, &reply);
-        int id = AUDIO_SESSION_ALLOCATE;
+        audio_unique_id_t id = AUDIO_SESSION_ALLOCATE;
         if (status == NO_ERROR) {
             id = reply.readInt32();
         }
@@ -964,7 +940,7 @@
             track_flags_t flags = (track_flags_t) data.readInt32();
             pid_t tid = (pid_t) data.readInt32();
             int sessionId = data.readInt32();
-            size_t notificationFrames = 0;
+            size_t notificationFrames = data.readInt64();
             sp<IMemory> cblk;
             sp<IMemory> buffers;
             status_t status;
@@ -1100,32 +1076,23 @@
         case OPEN_OUTPUT: {
             CHECK_INTERFACE(IAudioFlinger, data, reply);
             audio_module_handle_t module = (audio_module_handle_t)data.readInt32();
+            audio_config_t config;
+            data.read(&config, sizeof(audio_config_t));
             audio_devices_t devices = (audio_devices_t)data.readInt32();
-            uint32_t samplingRate = data.readInt32();
-            audio_format_t format = (audio_format_t) data.readInt32();
-            audio_channel_mask_t channelMask = (audio_channel_mask_t)data.readInt32();
-            uint32_t latency = data.readInt32();
+            String8 address(data.readString8());
             audio_output_flags_t flags = (audio_output_flags_t) data.readInt32();
-            bool hasOffloadInfo = data.readInt32() != 0;
-            audio_offload_info_t offloadInfo;
-            if (hasOffloadInfo) {
-                data.read(&offloadInfo, sizeof(audio_offload_info_t));
-            }
-            audio_io_handle_t output = openOutput(module,
-                                                 &devices,
-                                                 &samplingRate,
-                                                 &format,
-                                                 &channelMask,
-                                                 &latency,
-                                                 flags,
-                                                 hasOffloadInfo ? &offloadInfo : NULL);
+            uint32_t latencyMs;
+            audio_io_handle_t output;
+            status_t status = openOutput(module, &output, &config,
+                                         &devices, address, &latencyMs, flags);
             ALOGV("OPEN_OUTPUT output, %d", output);
-            reply->writeInt32((int32_t) output);
-            reply->writeInt32(devices);
-            reply->writeInt32(samplingRate);
-            reply->writeInt32(format);
-            reply->writeInt32(channelMask);
-            reply->writeInt32(latency);
+            reply->writeInt32((int32_t)status);
+            if (status == NO_ERROR) {
+                reply->writeInt32((int32_t)output);
+                reply->write(&config, sizeof(audio_config_t));
+                reply->writeInt32(devices);
+                reply->writeInt32(latencyMs);
+            }
             return NO_ERROR;
         } break;
         case OPEN_DUPLICATE_OUTPUT: {
@@ -1153,21 +1120,22 @@
         case OPEN_INPUT: {
             CHECK_INTERFACE(IAudioFlinger, data, reply);
             audio_module_handle_t module = (audio_module_handle_t)data.readInt32();
-            audio_devices_t devices = (audio_devices_t)data.readInt32();
-            uint32_t samplingRate = data.readInt32();
-            audio_format_t format = (audio_format_t) data.readInt32();
-            audio_channel_mask_t channelMask = (audio_channel_mask_t)data.readInt32();
+            audio_io_handle_t input = (audio_io_handle_t)data.readInt32();
+            audio_config_t config;
+            data.read(&config, sizeof(audio_config_t));
+            audio_devices_t device = (audio_devices_t)data.readInt32();
+            String8 address(data.readString8());
+            audio_source_t source = (audio_source_t)data.readInt32();
+            audio_input_flags_t flags = (audio_input_flags_t) data.readInt32();
 
-            audio_io_handle_t input = openInput(module,
-                                             &devices,
-                                             &samplingRate,
-                                             &format,
-                                             &channelMask);
-            reply->writeInt32((int32_t) input);
-            reply->writeInt32(devices);
-            reply->writeInt32(samplingRate);
-            reply->writeInt32(format);
-            reply->writeInt32(channelMask);
+            status_t status = openInput(module, &input, &config,
+                                        &device, address, source, flags);
+            reply->writeInt32((int32_t) status);
+            if (status == NO_ERROR) {
+                reply->writeInt32((int32_t) input);
+                reply->write(&config, sizeof(audio_config_t));
+                reply->writeInt32(device);
+            }
             return NO_ERROR;
         } break;
         case CLOSE_INPUT: {
@@ -1208,7 +1176,7 @@
         } break;
         case NEW_AUDIO_SESSION_ID: {
             CHECK_INTERFACE(IAudioFlinger, data, reply);
-            reply->writeInt32(newAudioSessionId());
+            reply->writeInt32(newAudioUniqueId());
             return NO_ERROR;
         } break;
         case ACQUIRE_AUDIO_SESSION_ID: {
diff --git a/media/libmedia/IAudioPolicyService.cpp b/media/libmedia/IAudioPolicyService.cpp
index 41a9065..1593b17 100644
--- a/media/libmedia/IAudioPolicyService.cpp
+++ b/media/libmedia/IAudioPolicyService.cpp
@@ -225,7 +225,8 @@
                                     uint32_t samplingRate,
                                     audio_format_t format,
                                     audio_channel_mask_t channelMask,
-                                    int audioSession)
+                                    int audioSession,
+                                    audio_input_flags_t flags)
     {
         Parcel data, reply;
         data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
@@ -234,33 +235,40 @@
         data.writeInt32(static_cast <uint32_t>(format));
         data.writeInt32(channelMask);
         data.writeInt32(audioSession);
+        data.writeInt32(flags);
         remote()->transact(GET_INPUT, data, &reply);
         return static_cast <audio_io_handle_t> (reply.readInt32());
     }
 
-    virtual status_t startInput(audio_io_handle_t input)
+    virtual status_t startInput(audio_io_handle_t input,
+                                audio_session_t session)
     {
         Parcel data, reply;
         data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
         data.writeInt32(input);
+        data.writeInt32(session);
         remote()->transact(START_INPUT, data, &reply);
         return static_cast <status_t> (reply.readInt32());
     }
 
-    virtual status_t stopInput(audio_io_handle_t input)
+    virtual status_t stopInput(audio_io_handle_t input,
+                               audio_session_t session)
     {
         Parcel data, reply;
         data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
         data.writeInt32(input);
+        data.writeInt32(session);
         remote()->transact(STOP_INPUT, data, &reply);
         return static_cast <status_t> (reply.readInt32());
     }
 
-    virtual void releaseInput(audio_io_handle_t input)
+    virtual void releaseInput(audio_io_handle_t input,
+                              audio_session_t session)
     {
         Parcel data, reply;
         data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
         data.writeInt32(input);
+        data.writeInt32(session);
         remote()->transact(RELEASE_INPUT, data, &reply);
     }
 
@@ -707,11 +715,13 @@
             audio_format_t format = (audio_format_t) data.readInt32();
             audio_channel_mask_t channelMask = data.readInt32();
             int audioSession = data.readInt32();
+            audio_input_flags_t flags = (audio_input_flags_t) data.readInt32();
             audio_io_handle_t input = getInput(inputSource,
                                                samplingRate,
                                                format,
                                                channelMask,
-                                               audioSession);
+                                               audioSession,
+                                               flags);
             reply->writeInt32(static_cast <int>(input));
             return NO_ERROR;
         } break;
@@ -719,21 +729,24 @@
         case START_INPUT: {
             CHECK_INTERFACE(IAudioPolicyService, data, reply);
             audio_io_handle_t input = static_cast <audio_io_handle_t>(data.readInt32());
-            reply->writeInt32(static_cast <uint32_t>(startInput(input)));
+            audio_session_t session = static_cast <audio_session_t>(data.readInt32());
+            reply->writeInt32(static_cast <uint32_t>(startInput(input, session)));
             return NO_ERROR;
         } break;
 
         case STOP_INPUT: {
             CHECK_INTERFACE(IAudioPolicyService, data, reply);
             audio_io_handle_t input = static_cast <audio_io_handle_t>(data.readInt32());
-            reply->writeInt32(static_cast <uint32_t>(stopInput(input)));
+            audio_session_t session = static_cast <audio_session_t>(data.readInt32());
+            reply->writeInt32(static_cast <uint32_t>(stopInput(input, session)));
             return NO_ERROR;
         } break;
 
         case RELEASE_INPUT: {
             CHECK_INTERFACE(IAudioPolicyService, data, reply);
             audio_io_handle_t input = static_cast <audio_io_handle_t>(data.readInt32());
-            releaseInput(input);
+            audio_session_t session = static_cast <audio_session_t>(data.readInt32());
+            releaseInput(input, session);
             return NO_ERROR;
         } break;
 
diff --git a/media/libmedia/MediaProfiles.cpp b/media/libmedia/MediaProfiles.cpp
index e9e453b..d2e181b 100644
--- a/media/libmedia/MediaProfiles.cpp
+++ b/media/libmedia/MediaProfiles.cpp
@@ -81,6 +81,12 @@
     {"timelapse1080p", CAMCORDER_QUALITY_TIME_LAPSE_1080P},
     {"timelapse2160p", CAMCORDER_QUALITY_TIME_LAPSE_2160P},
     {"timelapseqvga", CAMCORDER_QUALITY_TIME_LAPSE_QVGA},
+
+    {"highspeedlow",  CAMCORDER_QUALITY_HIGH_SPEED_LOW},
+    {"highspeedhigh", CAMCORDER_QUALITY_HIGH_SPEED_HIGH},
+    {"highspeed480p", CAMCORDER_QUALITY_HIGH_SPEED_480P},
+    {"highspeed720p", CAMCORDER_QUALITY_HIGH_SPEED_720P},
+    {"highspeed1080p", CAMCORDER_QUALITY_HIGH_SPEED_1080P},
 };
 
 #if LOG_NDEBUG
@@ -474,6 +480,11 @@
            quality <= CAMCORDER_QUALITY_TIME_LAPSE_LIST_END;
 }
 
+static bool isHighSpeedProfile(camcorder_quality quality) {
+    return quality >= CAMCORDER_QUALITY_HIGH_SPEED_LIST_START &&
+           quality <= CAMCORDER_QUALITY_HIGH_SPEED_LIST_END;
+}
+
 void MediaProfiles::initRequiredProfileRefs(const Vector<int>& cameraIds) {
     ALOGV("Number of camera ids: %zu", cameraIds.size());
     CHECK(cameraIds.size() > 0);
@@ -521,14 +532,17 @@
         camcorder_quality refQuality;
         VideoCodec *codec = NULL;
 
-        // Check high and low from either camcorder profile or timelapse profile
-        // but not both. Default, check camcorder profile
+        // Check high and low from either camcorder profile, timelapse profile
+        // or high speed profile, but not all of them. Default, check camcorder profile
         size_t j = 0;
         size_t o = 2;
         if (isTimelapseProfile(quality)) {
             // Check timelapse profile instead.
             j = 2;
             o = kNumRequiredProfiles;
+        } else if (isHighSpeedProfile(quality)) {
+            // Skip the check for high speed profile.
+            continue;
         } else {
             // Must be camcorder profile.
             CHECK(isCamcorderProfile(quality));
diff --git a/media/libmedia/SoundPool.cpp b/media/libmedia/SoundPool.cpp
index 2aa0592..d2e381b 100644
--- a/media/libmedia/SoundPool.cpp
+++ b/media/libmedia/SoundPool.cpp
@@ -28,6 +28,7 @@
 #include <media/mediaplayer.h>
 #include <media/SoundPool.h>
 #include "SoundPoolThread.h"
+#include <media/AudioPolicyHelper.h>
 
 namespace android
 {
@@ -39,10 +40,10 @@
 size_t kDefaultHeapSize = 1024 * 1024; // 1MB
 
 
-SoundPool::SoundPool(int maxChannels, audio_stream_type_t streamType, int srcQuality)
+SoundPool::SoundPool(int maxChannels, const audio_attributes_t* pAttributes)
 {
-    ALOGV("SoundPool constructor: maxChannels=%d, streamType=%d, srcQuality=%d",
-            maxChannels, streamType, srcQuality);
+    ALOGV("SoundPool constructor: maxChannels=%d, attr.usage=%d, attr.flags=0x%x, attr.tags=%s",
+            maxChannels, pAttributes->usage, pAttributes->flags, pAttributes->tags);
 
     // check limits
     mMaxChannels = maxChannels;
@@ -56,8 +57,7 @@
 
     mQuit = false;
     mDecodeThread = 0;
-    mStreamType = streamType;
-    mSrcQuality = srcQuality;
+    memcpy(&mAttributes, pAttributes, sizeof(audio_attributes_t));
     mAllocated = 0;
     mNextSampleID = 0;
     mNextChannelID = 0;
@@ -580,7 +580,7 @@
         // initialize track
         size_t afFrameCount;
         uint32_t afSampleRate;
-        audio_stream_type_t streamType = mSoundPool->streamType();
+        audio_stream_type_t streamType = audio_attributes_to_stream_type(mSoundPool->attributes());
         if (AudioSystem::getOutputFrameCount(&afFrameCount, streamType) != NO_ERROR) {
             afFrameCount = kDefaultFrameCount;
         }
diff --git a/media/libmedia/mediaplayer.cpp b/media/libmedia/mediaplayer.cpp
index 889bd7f..6cd377a 100644
--- a/media/libmedia/mediaplayer.cpp
+++ b/media/libmedia/mediaplayer.cpp
@@ -59,7 +59,7 @@
     mLeftVolume = mRightVolume = 1.0;
     mVideoWidth = mVideoHeight = 0;
     mLockThreadId = 0;
-    mAudioSessionId = AudioSystem::newAudioSessionId();
+    mAudioSessionId = AudioSystem::newAudioUniqueId();
     AudioSystem::acquireAudioSessionId(mAudioSessionId, -1);
     mSendLevel = 0;
     mRetransmitEndpointValid = false;
@@ -283,16 +283,21 @@
 status_t MediaPlayer::start()
 {
     ALOGV("start");
+
+    status_t ret = NO_ERROR;
     Mutex::Autolock _l(mLock);
-    if (mCurrentState & MEDIA_PLAYER_STARTED)
-        return NO_ERROR;
-    if ( (mPlayer != 0) && ( mCurrentState & ( MEDIA_PLAYER_PREPARED |
+
+    mLockThreadId = getThreadId();
+
+    if (mCurrentState & MEDIA_PLAYER_STARTED) {
+        ret = NO_ERROR;
+    } else if ( (mPlayer != 0) && ( mCurrentState & ( MEDIA_PLAYER_PREPARED |
                     MEDIA_PLAYER_PLAYBACK_COMPLETE | MEDIA_PLAYER_PAUSED ) ) ) {
         mPlayer->setLooping(mLoop);
         mPlayer->setVolume(mLeftVolume, mRightVolume);
         mPlayer->setAuxEffectSendLevel(mSendLevel);
         mCurrentState = MEDIA_PLAYER_STARTED;
-        status_t ret = mPlayer->start();
+        ret = mPlayer->start();
         if (ret != NO_ERROR) {
             mCurrentState = MEDIA_PLAYER_STATE_ERROR;
         } else {
@@ -300,10 +305,14 @@
                 ALOGV("playback completed immediately following start()");
             }
         }
-        return ret;
+    } else {
+        ALOGE("start called in state %d", mCurrentState);
+        ret = INVALID_OPERATION;
     }
-    ALOGE("start called in state %d", mCurrentState);
-    return INVALID_OPERATION;
+
+    mLockThreadId = 0;
+
+    return ret;
 }
 
 status_t MediaPlayer::stop()
@@ -706,8 +715,8 @@
     // running in the same process as the media server. In that case,
     // this will deadlock.
     //
-    // The threadId hack below works around this for the care of prepare
-    // and seekTo within the same process.
+    // The threadId hack below works around this for the care of prepare,
+    // seekTo and start within the same process.
     // FIXME: Remember, this is a hack, it's not even a hack that is applied
     // consistently for all use-cases, this needs to be revisited.
     if (mLockThreadId != getThreadId()) {
diff --git a/media/libmedia/mediarecorder.cpp b/media/libmedia/mediarecorder.cpp
index c8192e9..1952b86 100644
--- a/media/libmedia/mediarecorder.cpp
+++ b/media/libmedia/mediarecorder.cpp
@@ -186,8 +186,11 @@
         ALOGE("setOutputFormat called in an invalid state: %d", mCurrentState);
         return INVALID_OPERATION;
     }
-    if (mIsVideoSourceSet && of >= OUTPUT_FORMAT_AUDIO_ONLY_START && of != OUTPUT_FORMAT_RTP_AVP && of != OUTPUT_FORMAT_MPEG2TS) { //first non-video output format
-        ALOGE("output format (%d) is meant for audio recording only and incompatible with video recording", of);
+    if (mIsVideoSourceSet
+            && of >= OUTPUT_FORMAT_AUDIO_ONLY_START //first non-video output format
+            && of < OUTPUT_FORMAT_AUDIO_ONLY_END) {
+        ALOGE("output format (%d) is meant for audio recording only"
+              " and incompatible with video recording", of);
         return INVALID_OPERATION;
     }
 
diff --git a/media/libmediaplayerservice/Android.mk b/media/libmediaplayerservice/Android.mk
index 48d44c1..0c7e590c 100644
--- a/media/libmediaplayerservice/Android.mk
+++ b/media/libmediaplayerservice/Android.mk
@@ -49,6 +49,7 @@
     $(TOP)/frameworks/av/media/libstagefright/include               \
     $(TOP)/frameworks/av/media/libstagefright/rtsp                  \
     $(TOP)/frameworks/av/media/libstagefright/wifi-display          \
+    $(TOP)/frameworks/av/media/libstagefright/webm                  \
     $(TOP)/frameworks/native/include/media/openmax                  \
     $(TOP)/external/tremolo/Tremolo                                 \
 
diff --git a/media/libmediaplayerservice/MediaPlayerFactory.cpp b/media/libmediaplayerservice/MediaPlayerFactory.cpp
index e9c5e8e..dacb144 100644
--- a/media/libmediaplayerservice/MediaPlayerFactory.cpp
+++ b/media/libmediaplayerservice/MediaPlayerFactory.cpp
@@ -62,18 +62,18 @@
 
 player_type MediaPlayerFactory::getDefaultPlayerType() {
     char value[PROPERTY_VALUE_MAX];
-    if (property_get("media.stagefright.use-nuplayer", value, NULL)
+    if (property_get("media.stagefright.use-awesome", value, NULL)
             && (!strcmp("1", value) || !strcasecmp("true", value))) {
-        return NU_PLAYER;
+        return STAGEFRIGHT_PLAYER;
     }
 
     // TODO: remove this EXPERIMENTAL developer settings property
-    if (property_get("persist.sys.media.use-nuplayer", value, NULL)
+    if (property_get("persist.sys.media.use-awesome", value, NULL)
             && !strcasecmp("true", value)) {
-        return NU_PLAYER;
+        return STAGEFRIGHT_PLAYER;
     }
 
-    return STAGEFRIGHT_PLAYER;
+    return NU_PLAYER;
 }
 
 status_t MediaPlayerFactory::registerFactory(IFactory* factory,
diff --git a/media/libmediaplayerservice/MediaPlayerService.cpp b/media/libmediaplayerservice/MediaPlayerService.cpp
index 7218467..735344c 100644
--- a/media/libmediaplayerservice/MediaPlayerService.cpp
+++ b/media/libmediaplayerservice/MediaPlayerService.cpp
@@ -34,6 +34,7 @@
 
 #include <utils/misc.h>
 
+#include <binder/IBatteryStats.h>
 #include <binder/IPCThreadState.h>
 #include <binder/IServiceManager.h>
 #include <binder/MemoryHeapBase.h>
@@ -275,6 +276,20 @@
     // speaker is on by default
     mBatteryAudio.deviceOn[SPEAKER] = 1;
 
+    // reset battery stats
+    // if the mediaserver has crashed, battery stats could be left
+    // in bad state, reset the state upon service start.
+    const sp<IServiceManager> sm(defaultServiceManager());
+    if (sm != NULL) {
+        const String16 name("batterystats");
+        sp<IBatteryStats> batteryStats =
+                interface_cast<IBatteryStats>(sm->getService(name));
+        if (batteryStats != NULL) {
+            batteryStats->noteResetVideo();
+            batteryStats->noteResetAudio();
+        }
+    }
+
     MediaPlayerFactory::registerBuiltinFactories();
 }
 
diff --git a/media/libmediaplayerservice/StagefrightRecorder.cpp b/media/libmediaplayerservice/StagefrightRecorder.cpp
index bfc075c..8774117 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.cpp
+++ b/media/libmediaplayerservice/StagefrightRecorder.cpp
@@ -19,6 +19,7 @@
 #include <inttypes.h>
 #include <utils/Log.h>
 
+#include "WebmWriter.h"
 #include "StagefrightRecorder.h"
 
 #include <binder/IPCThreadState.h>
@@ -764,7 +765,8 @@
         case OUTPUT_FORMAT_DEFAULT:
         case OUTPUT_FORMAT_THREE_GPP:
         case OUTPUT_FORMAT_MPEG_4:
-            status = setupMPEG4Recording();
+        case OUTPUT_FORMAT_WEBM:
+            status = setupMPEG4orWEBMRecording();
             break;
 
         case OUTPUT_FORMAT_AMR_NB:
@@ -826,9 +828,14 @@
         case OUTPUT_FORMAT_DEFAULT:
         case OUTPUT_FORMAT_THREE_GPP:
         case OUTPUT_FORMAT_MPEG_4:
+        case OUTPUT_FORMAT_WEBM:
         {
+            bool isMPEG4 = true;
+            if (mOutputFormat == OUTPUT_FORMAT_WEBM) {
+                isMPEG4 = false;
+            }
             sp<MetaData> meta = new MetaData;
-            setupMPEG4MetaData(&meta);
+            setupMPEG4orWEBMMetaData(&meta);
             status = mWriter->start(meta.get());
             break;
         }
@@ -1538,12 +1545,17 @@
     return OK;
 }
 
-status_t StagefrightRecorder::setupMPEG4Recording() {
+status_t StagefrightRecorder::setupMPEG4orWEBMRecording() {
     mWriter.clear();
     mTotalBitRate = 0;
 
     status_t err = OK;
-    sp<MediaWriter> writer = new MPEG4Writer(mOutputFd);
+    sp<MediaWriter> writer;
+    if (mOutputFormat == OUTPUT_FORMAT_WEBM) {
+        writer = new WebmWriter(mOutputFd);
+    } else {
+        writer = new MPEG4Writer(mOutputFd);
+    }
 
     if (mVideoSource < VIDEO_SOURCE_LIST_END) {
 
@@ -1563,22 +1575,25 @@
         mTotalBitRate += mVideoBitRate;
     }
 
-    // Audio source is added at the end if it exists.
-    // This help make sure that the "recoding" sound is suppressed for
-    // camcorder applications in the recorded files.
-    if (!mCaptureTimeLapse && (mAudioSource != AUDIO_SOURCE_CNT)) {
-        err = setupAudioEncoder(writer);
-        if (err != OK) return err;
-        mTotalBitRate += mAudioBitRate;
-    }
+    if (mOutputFormat != OUTPUT_FORMAT_WEBM) {
+        // Audio source is added at the end if it exists.
+        // This help make sure that the "recoding" sound is suppressed for
+        // camcorder applications in the recorded files.
+        // TODO Audio source is currently unsupported for webm output; vorbis encoder needed.
+        if (!mCaptureTimeLapse && (mAudioSource != AUDIO_SOURCE_CNT)) {
+            err = setupAudioEncoder(writer);
+            if (err != OK) return err;
+            mTotalBitRate += mAudioBitRate;
+        }
 
-    if (mInterleaveDurationUs > 0) {
-        reinterpret_cast<MPEG4Writer *>(writer.get())->
-            setInterleaveDuration(mInterleaveDurationUs);
-    }
-    if (mLongitudex10000 > -3600000 && mLatitudex10000 > -3600000) {
-        reinterpret_cast<MPEG4Writer *>(writer.get())->
-            setGeoData(mLatitudex10000, mLongitudex10000);
+        if (mInterleaveDurationUs > 0) {
+            reinterpret_cast<MPEG4Writer *>(writer.get())->
+                setInterleaveDuration(mInterleaveDurationUs);
+        }
+        if (mLongitudex10000 > -3600000 && mLatitudex10000 > -3600000) {
+            reinterpret_cast<MPEG4Writer *>(writer.get())->
+                setGeoData(mLatitudex10000, mLongitudex10000);
+        }
     }
     if (mMaxFileDurationUs != 0) {
         writer->setMaxFileDuration(mMaxFileDurationUs);
@@ -1586,7 +1601,6 @@
     if (mMaxFileSizeBytes != 0) {
         writer->setMaxFileSize(mMaxFileSizeBytes);
     }
-
     if (mVideoSource == VIDEO_SOURCE_DEFAULT
             || mVideoSource == VIDEO_SOURCE_CAMERA) {
         mStartTimeOffsetMs = mEncoderProfiles->getStartTimeOffsetMs(mCameraId);
@@ -1595,8 +1609,7 @@
         mStartTimeOffsetMs = 200;
     }
     if (mStartTimeOffsetMs > 0) {
-        reinterpret_cast<MPEG4Writer *>(writer.get())->
-            setStartTimeOffsetMs(mStartTimeOffsetMs);
+        writer->setStartTimeOffsetMs(mStartTimeOffsetMs);
     }
 
     writer->setListener(mListener);
@@ -1604,20 +1617,22 @@
     return OK;
 }
 
-void StagefrightRecorder::setupMPEG4MetaData(sp<MetaData> *meta) {
+void StagefrightRecorder::setupMPEG4orWEBMMetaData(sp<MetaData> *meta) {
     int64_t startTimeUs = systemTime() / 1000;
     (*meta)->setInt64(kKeyTime, startTimeUs);
     (*meta)->setInt32(kKeyFileType, mOutputFormat);
     (*meta)->setInt32(kKeyBitRate, mTotalBitRate);
-    (*meta)->setInt32(kKey64BitFileOffset, mUse64BitFileOffset);
     if (mMovieTimeScale > 0) {
         (*meta)->setInt32(kKeyTimeScale, mMovieTimeScale);
     }
-    if (mTrackEveryTimeDurationUs > 0) {
-        (*meta)->setInt64(kKeyTrackTimeStatus, mTrackEveryTimeDurationUs);
-    }
-    if (mRotationDegrees != 0) {
-        (*meta)->setInt32(kKeyRotation, mRotationDegrees);
+    if (mOutputFormat != OUTPUT_FORMAT_WEBM) {
+        (*meta)->setInt32(kKey64BitFileOffset, mUse64BitFileOffset);
+        if (mTrackEveryTimeDurationUs > 0) {
+            (*meta)->setInt64(kKeyTrackTimeStatus, mTrackEveryTimeDurationUs);
+        }
+        if (mRotationDegrees != 0) {
+            (*meta)->setInt32(kKeyRotation, mRotationDegrees);
+        }
     }
 }
 
diff --git a/media/libmediaplayerservice/StagefrightRecorder.h b/media/libmediaplayerservice/StagefrightRecorder.h
index 377d168..9062f30 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.h
+++ b/media/libmediaplayerservice/StagefrightRecorder.h
@@ -128,8 +128,8 @@
     sp<ALooper> mLooper;
 
     status_t prepareInternal();
-    status_t setupMPEG4Recording();
-    void setupMPEG4MetaData(sp<MetaData> *meta);
+    status_t setupMPEG4orWEBMRecording();
+    void setupMPEG4orWEBMMetaData(sp<MetaData> *meta);
     status_t setupAMRRecording();
     status_t setupAACRecording();
     status_t setupRawAudioRecording();
diff --git a/media/libmediaplayerservice/nuplayer/Android.mk b/media/libmediaplayerservice/nuplayer/Android.mk
index 25002e3..0dd2b61 100644
--- a/media/libmediaplayerservice/nuplayer/Android.mk
+++ b/media/libmediaplayerservice/nuplayer/Android.mk
@@ -18,6 +18,7 @@
 	$(TOP)/frameworks/av/media/libstagefright/include             \
 	$(TOP)/frameworks/av/media/libstagefright/mpeg2ts             \
 	$(TOP)/frameworks/av/media/libstagefright/rtsp                \
+	$(TOP)/frameworks/av/media/libstagefright/timedtext           \
 	$(TOP)/frameworks/native/include/media/openmax
 
 LOCAL_MODULE:= libstagefright_nuplayer
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.cpp b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
index cc0cb01..a3e84df 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
@@ -28,6 +28,7 @@
 #include <media/stagefright/MediaExtractor.h>
 #include <media/stagefright/MediaSource.h>
 #include <media/stagefright/MetaData.h>
+#include "../../libstagefright/include/WVMExtractor.h"
 
 namespace android {
 
@@ -35,10 +36,18 @@
         const sp<AMessage> &notify,
         const sp<IMediaHTTPService> &httpService,
         const char *url,
-        const KeyedVector<String8, String8> *headers)
+        const KeyedVector<String8, String8> *headers,
+        bool isWidevine,
+        bool uidValid,
+        uid_t uid)
     : Source(notify),
+      mFetchSubtitleDataGeneration(0),
+      mFetchTimedTextDataGeneration(0),
       mDurationUs(0ll),
-      mAudioIsVorbis(false) {
+      mAudioIsVorbis(false),
+      mIsWidevine(isWidevine),
+      mUIDValid(uidValid),
+      mUID(uid) {
     DataSource::RegisterDefaultSniffers();
 
     sp<DataSource> dataSource =
@@ -52,8 +61,11 @@
         const sp<AMessage> &notify,
         int fd, int64_t offset, int64_t length)
     : Source(notify),
+      mFetchSubtitleDataGeneration(0),
+      mFetchTimedTextDataGeneration(0),
       mDurationUs(0ll),
-      mAudioIsVorbis(false) {
+      mAudioIsVorbis(false),
+      mIsWidevine(false) {
     DataSource::RegisterDefaultSniffers();
 
     sp<DataSource> dataSource = new FileSource(dup(fd), offset, length);
@@ -63,7 +75,31 @@
 
 void NuPlayer::GenericSource::initFromDataSource(
         const sp<DataSource> &dataSource) {
-    sp<MediaExtractor> extractor = MediaExtractor::Create(dataSource);
+    sp<MediaExtractor> extractor;
+
+    if (mIsWidevine) {
+        String8 mimeType;
+        float confidence;
+        sp<AMessage> dummy;
+        bool success;
+
+        success = SniffWVM(dataSource, &mimeType, &confidence, &dummy);
+        if (!success
+                || strcasecmp(
+                    mimeType.string(), MEDIA_MIMETYPE_CONTAINER_WVM)) {
+            ALOGE("unsupported widevine mime: %s", mimeType.string());
+            return;
+        }
+
+        sp<WVMExtractor> wvmExtractor = new WVMExtractor(dataSource);
+        wvmExtractor->setAdaptiveStreamingMode(true);
+        if (mUIDValid) {
+            wvmExtractor->setUID(mUID);
+        }
+        extractor = wvmExtractor;
+    } else {
+        extractor = MediaExtractor::Create(dataSource);
+    }
 
     CHECK(extractor != NULL);
 
@@ -113,6 +149,13 @@
     }
 }
 
+status_t NuPlayer::GenericSource::setBuffers(bool audio, Vector<MediaBuffer *> &buffers) {
+    if (mIsWidevine && !audio) {
+        return mVideoTrack.mSource->setBuffers(buffers);
+    }
+    return INVALID_OPERATION;
+}
+
 NuPlayer::GenericSource::~GenericSource() {
 }
 
@@ -128,7 +171,8 @@
     }
 
     notifyFlagsChanged(
-            FLAG_CAN_PAUSE
+            (mIsWidevine ? FLAG_SECURE : 0)
+            | FLAG_CAN_PAUSE
             | FLAG_CAN_SEEK_BACKWARD
             | FLAG_CAN_SEEK_FORWARD
             | FLAG_CAN_SEEK);
@@ -141,20 +185,18 @@
 
     if (mAudioTrack.mSource != NULL) {
         CHECK_EQ(mAudioTrack.mSource->start(), (status_t)OK);
-
         mAudioTrack.mPackets =
             new AnotherPacketSource(mAudioTrack.mSource->getFormat());
 
-        readBuffer(true /* audio */);
+        readBuffer(MEDIA_TRACK_TYPE_AUDIO);
     }
 
     if (mVideoTrack.mSource != NULL) {
         CHECK_EQ(mVideoTrack.mSource->start(), (status_t)OK);
-
         mVideoTrack.mPackets =
             new AnotherPacketSource(mVideoTrack.mSource->getFormat());
 
-        readBuffer(false /* audio */);
+        readBuffer(MEDIA_TRACK_TYPE_VIDEO);
     }
 }
 
@@ -162,6 +204,158 @@
     return OK;
 }
 
+void NuPlayer::GenericSource::onMessageReceived(const sp<AMessage> &msg) {
+    switch (msg->what()) {
+      case kWhatFetchSubtitleData:
+      {
+          fetchTextData(kWhatSendSubtitleData, MEDIA_TRACK_TYPE_SUBTITLE,
+                  mFetchSubtitleDataGeneration, mSubtitleTrack.mPackets, msg);
+          break;
+      }
+
+      case kWhatFetchTimedTextData:
+      {
+          fetchTextData(kWhatSendTimedTextData, MEDIA_TRACK_TYPE_TIMEDTEXT,
+                  mFetchTimedTextDataGeneration, mTimedTextTrack.mPackets, msg);
+          break;
+      }
+
+      case kWhatSendSubtitleData:
+      {
+          sendTextData(kWhatSubtitleData, MEDIA_TRACK_TYPE_SUBTITLE,
+                  mFetchSubtitleDataGeneration, mSubtitleTrack.mPackets, msg);
+          break;
+      }
+
+      case kWhatSendTimedTextData:
+      {
+          sendTextData(kWhatTimedTextData, MEDIA_TRACK_TYPE_TIMEDTEXT,
+                  mFetchTimedTextDataGeneration, mTimedTextTrack.mPackets, msg);
+          break;
+      }
+
+      case kWhatChangeAVSource:
+      {
+          int32_t trackIndex;
+          CHECK(msg->findInt32("trackIndex", &trackIndex));
+          const sp<MediaSource> source = mSources.itemAt(trackIndex);
+
+          Track* track;
+          const char *mime;
+          media_track_type trackType, counterpartType;
+          sp<MetaData> meta = source->getFormat();
+          meta->findCString(kKeyMIMEType, &mime);
+          if (!strncasecmp(mime, "audio/", 6)) {
+              track = &mAudioTrack;
+              trackType = MEDIA_TRACK_TYPE_AUDIO;
+              counterpartType = MEDIA_TRACK_TYPE_VIDEO;;
+          } else {
+              CHECK(!strncasecmp(mime, "video/", 6));
+              track = &mVideoTrack;
+              trackType = MEDIA_TRACK_TYPE_VIDEO;
+              counterpartType = MEDIA_TRACK_TYPE_AUDIO;;
+          }
+
+
+          if (track->mSource != NULL) {
+              track->mSource->stop();
+          }
+          track->mSource = source;
+          track->mSource->start();
+          track->mIndex = trackIndex;
+
+          status_t avail;
+          if (!track->mPackets->hasBufferAvailable(&avail)) {
+              // sync from other source
+              TRESPASS();
+              break;
+          }
+
+          int64_t timeUs, actualTimeUs;
+          const bool formatChange = true;
+          sp<AMessage> latestMeta = track->mPackets->getLatestMeta();
+          CHECK(latestMeta != NULL && latestMeta->findInt64("timeUs", &timeUs));
+          readBuffer(trackType, timeUs, &actualTimeUs, formatChange);
+          readBuffer(counterpartType, -1, NULL, formatChange);
+          ALOGV("timeUs %lld actualTimeUs %lld", timeUs, actualTimeUs);
+
+          break;
+      }
+
+      default:
+          Source::onMessageReceived(msg);
+          break;
+    }
+}
+
+void NuPlayer::GenericSource::fetchTextData(
+        uint32_t sendWhat,
+        media_track_type type,
+        int32_t curGen,
+        sp<AnotherPacketSource> packets,
+        sp<AMessage> msg) {
+    int32_t msgGeneration;
+    CHECK(msg->findInt32("generation", &msgGeneration));
+    if (msgGeneration != curGen) {
+        // stale
+        return;
+    }
+
+    int32_t avail;
+    if (packets->hasBufferAvailable(&avail)) {
+        return;
+    }
+
+    int64_t timeUs;
+    CHECK(msg->findInt64("timeUs", &timeUs));
+
+    int64_t subTimeUs;
+    readBuffer(type, timeUs, &subTimeUs);
+
+    int64_t delayUs = subTimeUs - timeUs;
+    if (msg->what() == kWhatFetchSubtitleData) {
+        const int64_t oneSecUs = 1000000ll;
+        delayUs -= oneSecUs;
+    }
+    sp<AMessage> msg2 = new AMessage(sendWhat, id());
+    msg2->setInt32("generation", msgGeneration);
+    msg2->post(delayUs < 0 ? 0 : delayUs);
+}
+
+void NuPlayer::GenericSource::sendTextData(
+        uint32_t what,
+        media_track_type type,
+        int32_t curGen,
+        sp<AnotherPacketSource> packets,
+        sp<AMessage> msg) {
+    int32_t msgGeneration;
+    CHECK(msg->findInt32("generation", &msgGeneration));
+    if (msgGeneration != curGen) {
+        // stale
+        return;
+    }
+
+    int64_t subTimeUs;
+    if (packets->nextBufferTime(&subTimeUs) != OK) {
+        return;
+    }
+
+    int64_t nextSubTimeUs;
+    readBuffer(type, -1, &nextSubTimeUs);
+
+    sp<ABuffer> buffer;
+    status_t dequeueStatus = packets->dequeueAccessUnit(&buffer);
+    if (dequeueStatus == OK) {
+        sp<AMessage> notify = dupNotify();
+        notify->setInt32("what", what);
+        notify->setBuffer("buffer", buffer);
+        notify->post();
+
+        const int64_t delayUs = nextSubTimeUs - subTimeUs;
+        msg->post(delayUs < 0 ? 0 : delayUs);
+    }
+}
+
 sp<MetaData> NuPlayer::GenericSource::getFormatMeta(bool audio) {
     sp<MediaSource> source = audio ? mAudioTrack.mSource : mVideoTrack.mSource;
 
@@ -180,14 +374,64 @@
         return -EWOULDBLOCK;
     }
 
+    if (mIsWidevine && !audio) {
+        // try to read a buffer as we may not have been able to the last time
+        readBuffer(MEDIA_TRACK_TYPE_VIDEO, -1ll);
+    }
+
     status_t finalResult;
     if (!track->mPackets->hasBufferAvailable(&finalResult)) {
-        return finalResult == OK ? -EWOULDBLOCK : finalResult;
+        return (finalResult == OK ? -EWOULDBLOCK : finalResult);
     }
 
     status_t result = track->mPackets->dequeueAccessUnit(accessUnit);
 
-    readBuffer(audio, -1ll);
+    if (!track->mPackets->hasBufferAvailable(&finalResult)) {
+        readBuffer(audio? MEDIA_TRACK_TYPE_AUDIO : MEDIA_TRACK_TYPE_VIDEO, -1ll);
+    }
+
+    if (mSubtitleTrack.mSource == NULL && mTimedTextTrack.mSource == NULL) {
+        return result;
+    }
+
+    if (mSubtitleTrack.mSource != NULL) {
+        CHECK(mSubtitleTrack.mPackets != NULL);
+    }
+    if (mTimedTextTrack.mSource != NULL) {
+        CHECK(mTimedTextTrack.mPackets != NULL);
+    }
+
+    if (result != OK) {
+        if (mSubtitleTrack.mSource != NULL) {
+            mSubtitleTrack.mPackets->clear();
+            mFetchSubtitleDataGeneration++;
+        }
+        if (mTimedTextTrack.mSource != NULL) {
+            mTimedTextTrack.mPackets->clear();
+            mFetchTimedTextDataGeneration++;
+        }
+        return result;
+    }
+
+    int64_t timeUs;
+    status_t eosResult; // ignored
+    CHECK((*accessUnit)->meta()->findInt64("timeUs", &timeUs));
+
+    if (mSubtitleTrack.mSource != NULL
+            && !mSubtitleTrack.mPackets->hasBufferAvailable(&eosResult)) {
+        sp<AMessage> msg = new AMessage(kWhatFetchSubtitleData, id());
+        msg->setInt64("timeUs", timeUs);
+        msg->setInt32("generation", mFetchSubtitleDataGeneration);
+        msg->post();
+    }
+
+    if (mTimedTextTrack.mSource != NULL
+            && !mTimedTextTrack.mPackets->hasBufferAvailable(&eosResult)) {
+        sp<AMessage> msg = new AMessage(kWhatFetchTimedTextData, id());
+        msg->setInt64("timeUs", timeUs);
+        msg->setInt32("generation", mFetchTimedTextDataGeneration);
+        msg->post();
+    }
 
     return result;
 }
@@ -247,25 +491,207 @@
     return format;
 }
 
+ssize_t NuPlayer::GenericSource::getSelectedTrack(media_track_type type) const {
+    const Track *track = NULL;
+    switch (type) {
+    case MEDIA_TRACK_TYPE_VIDEO:
+        track = &mVideoTrack;
+        break;
+    case MEDIA_TRACK_TYPE_AUDIO:
+        track = &mAudioTrack;
+        break;
+    case MEDIA_TRACK_TYPE_TIMEDTEXT:
+        track = &mTimedTextTrack;
+        break;
+    case MEDIA_TRACK_TYPE_SUBTITLE:
+        track = &mSubtitleTrack;
+        break;
+    default:
+        break;
+    }
+
+    if (track != NULL && track->mSource != NULL) {
+        return track->mIndex;
+    }
+
+    return -1;
+}
+
+status_t NuPlayer::GenericSource::selectTrack(size_t trackIndex, bool select) {
+    ALOGV("%s track: %zu", select ? "select" : "deselect", trackIndex);
+    if (trackIndex >= mSources.size()) {
+        return BAD_INDEX;
+    }
+
+    if (!select) {
+        Track* track = NULL;
+        if (mSubtitleTrack.mSource != NULL && trackIndex == mSubtitleTrack.mIndex) {
+            track = &mSubtitleTrack;
+            mFetchSubtitleDataGeneration++;
+        } else if (mTimedTextTrack.mSource != NULL && trackIndex == mTimedTextTrack.mIndex) {
+            track = &mTimedTextTrack;
+            mFetchTimedTextDataGeneration++;
+        }
+        if (track == NULL) {
+            return INVALID_OPERATION;
+        }
+        track->mSource->stop();
+        track->mSource = NULL;
+        track->mPackets->clear();
+        return OK;
+    }
+
+    const sp<MediaSource> source = mSources.itemAt(trackIndex);
+    sp<MetaData> meta = source->getFormat();
+    const char *mime;
+    CHECK(meta->findCString(kKeyMIMEType, &mime));
+    if (!strncasecmp(mime, "text/", 5)) {
+        bool isSubtitle = strcasecmp(mime, MEDIA_MIMETYPE_TEXT_3GPP);
+        Track *track = isSubtitle ? &mSubtitleTrack : &mTimedTextTrack;
+        if (track->mSource != NULL && track->mIndex == trackIndex) {
+            return OK;
+        }
+        track->mIndex = trackIndex;
+        if (track->mSource != NULL) {
+            track->mSource->stop();
+        }
+        track->mSource = mSources.itemAt(trackIndex);
+        track->mSource->start();
+        if (track->mPackets == NULL) {
+            track->mPackets = new AnotherPacketSource(track->mSource->getFormat());
+        } else {
+            track->mPackets->clear();
+            track->mPackets->setFormat(track->mSource->getFormat());
+
+        }
+
+        if (isSubtitle) {
+            mFetchSubtitleDataGeneration++;
+        } else {
+            mFetchTimedTextDataGeneration++;
+        }
+
+        return OK;
+    } else if (!strncasecmp(mime, "audio/", 6) || !strncasecmp(mime, "video/", 6)) {
+        bool audio = !strncasecmp(mime, "audio/", 6);
+        Track *track = audio ? &mAudioTrack : &mVideoTrack;
+        if (track->mSource != NULL && track->mIndex == trackIndex) {
+            return OK;
+        }
+
+        sp<AMessage> msg = new AMessage(kWhatChangeAVSource, id());
+        msg->setInt32("trackIndex", trackIndex);
+        msg->post();
+        return OK;
+    }
+
+    return INVALID_OPERATION;
+}
+
 status_t NuPlayer::GenericSource::seekTo(int64_t seekTimeUs) {
     if (mVideoTrack.mSource != NULL) {
         int64_t actualTimeUs;
-        readBuffer(false /* audio */, seekTimeUs, &actualTimeUs);
+        readBuffer(MEDIA_TRACK_TYPE_VIDEO, seekTimeUs, &actualTimeUs);
 
         seekTimeUs = actualTimeUs;
     }
 
     if (mAudioTrack.mSource != NULL) {
-        readBuffer(true /* audio */, seekTimeUs);
+        readBuffer(MEDIA_TRACK_TYPE_AUDIO, seekTimeUs);
     }
 
     return OK;
 }
 
+sp<ABuffer> NuPlayer::GenericSource::mediaBufferToABuffer(
+        MediaBuffer* mb,
+        media_track_type trackType,
+        int64_t *actualTimeUs) {
+    bool audio = trackType == MEDIA_TRACK_TYPE_AUDIO;
+    size_t outLength = mb->range_length();
+
+    if (audio && mAudioIsVorbis) {
+        outLength += sizeof(int32_t);
+    }
+
+    sp<ABuffer> ab;
+    if (mIsWidevine && !audio) {
+        // data is already provided in the buffer
+        ab = new ABuffer(NULL, mb->range_length());
+        ab->meta()->setPointer("mediaBuffer", mb);
+        mb->add_ref();
+    } else {
+        ab = new ABuffer(outLength);
+        memcpy(ab->data(),
+               (const uint8_t *)mb->data() + mb->range_offset(),
+               mb->range_length());
+    }
+
+    if (audio && mAudioIsVorbis) {
+        int32_t numPageSamples;
+        if (!mb->meta_data()->findInt32(kKeyValidSamples, &numPageSamples)) {
+            numPageSamples = -1;
+        }
+
+        uint8_t* abEnd = ab->data() + mb->range_length();
+        memcpy(abEnd, &numPageSamples, sizeof(numPageSamples));
+    }
+
+    sp<AMessage> meta = ab->meta();
+
+    int64_t timeUs;
+    CHECK(mb->meta_data()->findInt64(kKeyTime, &timeUs));
+    meta->setInt64("timeUs", timeUs);
+
+    if (trackType == MEDIA_TRACK_TYPE_TIMEDTEXT) {
+        const char *mime;
+        CHECK(mTimedTextTrack.mSource != NULL
+                && mTimedTextTrack.mSource->getFormat()->findCString(kKeyMIMEType, &mime));
+        meta->setString("mime", mime);
+    }
+
+    int64_t durationUs;
+    if (mb->meta_data()->findInt64(kKeyDuration, &durationUs)) {
+        meta->setInt64("durationUs", durationUs);
+    }
+
+    if (trackType == MEDIA_TRACK_TYPE_SUBTITLE) {
+        meta->setInt32("trackIndex", mSubtitleTrack.mIndex);
+    }
+
+    if (actualTimeUs) {
+        *actualTimeUs = timeUs;
+    }
+
+    mb->release();
+    mb = NULL;
+
+    return ab;
+}
+
 void NuPlayer::GenericSource::readBuffer(
-        bool audio, int64_t seekTimeUs, int64_t *actualTimeUs) {
-    Track *track = audio ? &mAudioTrack : &mVideoTrack;
-    CHECK(track->mSource != NULL);
+        media_track_type trackType, int64_t seekTimeUs, int64_t *actualTimeUs, bool formatChange) {
+    Track *track;
+    switch (trackType) {
+        case MEDIA_TRACK_TYPE_VIDEO:
+            track = &mVideoTrack;
+            break;
+        case MEDIA_TRACK_TYPE_AUDIO:
+            track = &mAudioTrack;
+            break;
+        case MEDIA_TRACK_TYPE_SUBTITLE:
+            track = &mSubtitleTrack;
+            break;
+        case MEDIA_TRACK_TYPE_TIMEDTEXT:
+            track = &mTimedTextTrack;
+            break;
+        default:
+            TRESPASS();
+    }
+
+    if (track->mSource == NULL) {
+        return;
+    }
 
     if (actualTimeUs) {
         *actualTimeUs = seekTimeUs;
@@ -276,10 +702,14 @@
     bool seeking = false;
 
     if (seekTimeUs >= 0) {
-        options.setSeekTo(seekTimeUs);
+        options.setSeekTo(seekTimeUs, MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC);
         seeking = true;
     }
 
+    if (mIsWidevine && trackType != MEDIA_TRACK_TYPE_AUDIO) {
+        options.setNonBlocking();
+    }
+
     for (;;) {
         MediaBuffer *mbuf;
         status_t err = track->mSource->read(&mbuf, &options);
@@ -287,51 +717,25 @@
         options.clearSeekTo();
 
         if (err == OK) {
-            size_t outLength = mbuf->range_length();
-
-            if (audio && mAudioIsVorbis) {
-                outLength += sizeof(int32_t);
+            // formatChange && seeking: track whose source is changed during selection
+            // formatChange && !seeking: track whose source is not changed during selection
+            // !formatChange: normal seek
+            if ((seeking || formatChange)
+                    && (trackType == MEDIA_TRACK_TYPE_AUDIO
+                    || trackType == MEDIA_TRACK_TYPE_VIDEO)) {
+                ATSParser::DiscontinuityType type = formatChange
+                        ? (seeking
+                                ? ATSParser::DISCONTINUITY_FORMATCHANGE
+                                : ATSParser::DISCONTINUITY_NONE)
+                        : ATSParser::DISCONTINUITY_SEEK;
+                track->mPackets->queueDiscontinuity( type, NULL, true /* discard */);
             }
 
-            sp<ABuffer> buffer = new ABuffer(outLength);
-
-            memcpy(buffer->data(),
-                   (const uint8_t *)mbuf->data() + mbuf->range_offset(),
-                   mbuf->range_length());
-
-            if (audio && mAudioIsVorbis) {
-                int32_t numPageSamples;
-                if (!mbuf->meta_data()->findInt32(
-                            kKeyValidSamples, &numPageSamples)) {
-                    numPageSamples = -1;
-                }
-
-                memcpy(buffer->data() + mbuf->range_length(),
-                       &numPageSamples,
-                       sizeof(numPageSamples));
-            }
-
-            int64_t timeUs;
-            CHECK(mbuf->meta_data()->findInt64(kKeyTime, &timeUs));
-
-            buffer->meta()->setInt64("timeUs", timeUs);
-
-            if (actualTimeUs) {
-                *actualTimeUs = timeUs;
-            }
-
-            mbuf->release();
-            mbuf = NULL;
-
-            if (seeking) {
-                track->mPackets->queueDiscontinuity(
-                        ATSParser::DISCONTINUITY_SEEK,
-                        NULL,
-                        true /* discard */);
-            }
-
+            sp<ABuffer> buffer = mediaBufferToABuffer(mbuf, trackType, actualTimeUs);
             track->mPackets->queueAccessUnit(buffer);
             break;
+        } else if (err == WOULD_BLOCK) {
+            break;
         } else if (err == INFO_FORMAT_CHANGED) {
 #if 0
             track->mPackets->queueDiscontinuity(
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.h b/media/libmediaplayerservice/nuplayer/GenericSource.h
index e0cd20f..3c5f55c 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.h
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.h
@@ -23,19 +23,25 @@
 
 #include "ATSParser.h"
 
+#include <media/mediaplayer.h>
+
 namespace android {
 
 struct AnotherPacketSource;
 struct ARTSPController;
 struct DataSource;
 struct MediaSource;
+class MediaBuffer;
 
 struct NuPlayer::GenericSource : public NuPlayer::Source {
     GenericSource(
             const sp<AMessage> &notify,
             const sp<IMediaHTTPService> &httpService,
             const char *url,
-            const KeyedVector<String8, String8> *headers);
+            const KeyedVector<String8, String8> *headers,
+            bool isWidevine = false,
+            bool uidValid = false,
+            uid_t uid = 0);
 
     GenericSource(
             const sp<AMessage> &notify,
@@ -52,14 +58,28 @@
     virtual status_t getDuration(int64_t *durationUs);
     virtual size_t getTrackCount() const;
     virtual sp<AMessage> getTrackInfo(size_t trackIndex) const;
+    virtual ssize_t getSelectedTrack(media_track_type type) const;
+    virtual status_t selectTrack(size_t trackIndex, bool select);
     virtual status_t seekTo(int64_t seekTimeUs);
 
+    virtual status_t setBuffers(bool audio, Vector<MediaBuffer *> &buffers);
+
 protected:
     virtual ~GenericSource();
 
+    virtual void onMessageReceived(const sp<AMessage> &msg);
+
     virtual sp<MetaData> getFormatMeta(bool audio);
 
 private:
+    enum {
+        kWhatFetchSubtitleData,
+        kWhatFetchTimedTextData,
+        kWhatSendSubtitleData,
+        kWhatSendTimedTextData,
+        kWhatChangeAVSource,
+    };
+
     Vector<sp<MediaSource> > mSources;
 
     struct Track {
@@ -70,15 +90,35 @@
 
     Track mAudioTrack;
     Track mVideoTrack;
+    Track mSubtitleTrack;
+    Track mTimedTextTrack;
 
+    int32_t mFetchSubtitleDataGeneration;
+    int32_t mFetchTimedTextDataGeneration;
     int64_t mDurationUs;
     bool mAudioIsVorbis;
+    bool mIsWidevine;
+    bool mUIDValid;
+    uid_t mUID;
 
     void initFromDataSource(const sp<DataSource> &dataSource);
 
+    void fetchTextData(
+            uint32_t what, media_track_type type,
+            int32_t curGen, sp<AnotherPacketSource> packets, sp<AMessage> msg);
+
+    void sendTextData(
+            uint32_t what, media_track_type type,
+            int32_t curGen, sp<AnotherPacketSource> packets, sp<AMessage> msg);
+
+    sp<ABuffer> mediaBufferToABuffer(
+            MediaBuffer *mbuf,
+            media_track_type trackType,
+            int64_t *actualTimeUs = NULL);
+
     void readBuffer(
-            bool audio,
-            int64_t seekTimeUs = -1ll, int64_t *actualTimeUs = NULL);
+            media_track_type trackType,
+            int64_t seekTimeUs = -1ll, int64_t *actualTimeUs = NULL, bool formatChange = false);
 
     DISALLOW_EVIL_CONSTRUCTORS(GenericSource);
 };
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
index 88c59bf..58d0138 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
@@ -29,6 +29,7 @@
 #include "RTSPSource.h"
 #include "StreamingSource.h"
 #include "GenericSource.h"
+#include "TextDescriptions.h"
 
 #include "ATSParser.h"
 
@@ -36,6 +37,7 @@
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/MediaBuffer.h>
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/MetaData.h>
@@ -150,6 +152,7 @@
       mScanSourcesPending(false),
       mScanSourcesGeneration(0),
       mPollDurationGeneration(0),
+      mTimedTextGeneration(0),
       mTimeDiscontinuityPending(false),
       mFlushingAudio(NONE),
       mFlushingVideo(NONE),
@@ -221,6 +224,10 @@
                     || strstr(url, ".sdp?"))) {
         source = new RTSPSource(
                 notify, httpService, url, headers, mUIDValid, mUID, true);
+    } else if ((!strncasecmp(url, "widevine://", 11))) {
+        source = new GenericSource(notify, httpService, url, headers,
+                true /* isWidevine */, mUIDValid, mUID);
+        mSourceFlags |= Source::FLAG_SECURE;
     } else {
         source = new GenericSource(notify, httpService, url, headers);
     }
@@ -423,6 +430,16 @@
 
             if (trackIndex < inbandTracks) {
                 err = mSource->selectTrack(trackIndex, select);
+
+                if (!select && err == OK) {
+                    int32_t type;
+                    sp<AMessage> info = mSource->getTrackInfo(trackIndex);
+                    if (info != NULL
+                            && info->findInt32("type", &type)
+                            && type == MEDIA_TRACK_TYPE_TIMEDTEXT) {
+                        ++mTimedTextGeneration;
+                    }
+                }
             } else {
                 trackIndex -= inbandTracks;
 
@@ -512,6 +529,17 @@
             mNumFramesDropped = 0;
             mStarted = true;
 
+            /* instantiate decoders now for secure playback */
+            if (mSourceFlags & Source::FLAG_SECURE) {
+                if (mNativeWindow != NULL) {
+                    instantiateDecoder(false, &mVideoDecoder);
+                }
+
+                if (mAudioSink != NULL) {
+                    instantiateDecoder(true, &mAudioDecoder);
+                }
+            }
+
             mSource->start();
 
             uint32_t flags = 0;
@@ -540,7 +568,10 @@
                     new AMessage(kWhatRendererNotify, id()),
                     flags);
 
-            looper()->registerHandler(mRenderer);
+            mRendererLooper = new ALooper;
+            mRendererLooper->setName("NuPlayerRenderer");
+            mRendererLooper->start(false, false, ANDROID_PRIORITY_AUDIO);
+            mRendererLooper->registerHandler(mRenderer);
 
             postScanSources();
             break;
@@ -735,6 +766,7 @@
                             offloadInfo.has_video = (mVideoDecoder != NULL);
                             offloadInfo.is_streaming = true;
 
+                            ALOGV("try to open AudioSink in offload mode");
                             err = mAudioSink->open(
                                     sampleRate,
                                     numChannels,
@@ -774,6 +806,7 @@
 
                     if (!mOffloadAudio) {
                         flags &= ~AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD;
+                        ALOGV("open AudioSink in NON-offload mode");
                         CHECK_EQ(mAudioSink->open(
                                     sampleRate,
                                     numChannels,
@@ -827,8 +860,23 @@
                               displayWidth, displayHeight);
                     }
 
-                    notifyListener(
-                            MEDIA_SET_VIDEO_SIZE, displayWidth, displayHeight);
+                    int32_t rotationDegrees;
+                    if (!videoInputFormat->findInt32(
+                            "rotation-degrees", &rotationDegrees)) {
+                        rotationDegrees = 0;
+                    }
+
+                    if (rotationDegrees == 90 || rotationDegrees == 270) {
+                        notifyListener(
+                                MEDIA_SET_VIDEO_SIZE,
+                                displayHeight,
+                                displayWidth);
+                    } else {
+                        notifyListener(
+                                MEDIA_SET_VIDEO_SIZE,
+                                displayWidth,
+                                displayHeight);
+                    }
                 }
             } else if (what == Decoder::kWhatShutdownCompleted) {
                 ALOGV("%s shutdown completed", audio ? "audio" : "video");
@@ -921,6 +969,21 @@
             } else if (what == Renderer::kWhatMediaRenderingStart) {
                 ALOGV("media rendering started");
                 notifyListener(MEDIA_STARTED, 0, 0);
+            } else if (what == Renderer::kWhatAudioOffloadTearDown) {
+                ALOGV("Tear down audio offload, fall back to s/w path");
+                int64_t positionUs;
+                CHECK(msg->findInt64("positionUs", &positionUs));
+                mAudioSink->close();
+                mAudioDecoder.clear();
+                mRenderer->flush(true /* audio */);
+                if (mVideoDecoder != NULL) {
+                    mRenderer->flush(false /* audio */);
+                }
+                mRenderer->signalDisableOffloadAudio();
+                mOffloadAudio = false;
+
+                performSeek(positionUs);
+                instantiateDecoder(true /* audio */, &mAudioDecoder);
             }
             break;
         }
@@ -1055,6 +1118,10 @@
 
         sp<AMessage> ccNotify = new AMessage(kWhatClosedCaptionNotify, id());
         mCCDecoder = new CCDecoder(ccNotify);
+
+        if (mSourceFlags & Source::FLAG_SECURE) {
+            format->setInt32("secure", true);
+        }
     }
 
     sp<AMessage> notify =
@@ -1073,6 +1140,28 @@
     (*decoder)->init();
     (*decoder)->configure(format);
 
+    // allocate buffers to decrypt widevine source buffers
+    if (!audio && (mSourceFlags & Source::FLAG_SECURE)) {
+        Vector<sp<ABuffer> > inputBufs;
+        CHECK_EQ((*decoder)->getInputBuffers(&inputBufs), (status_t)OK);
+
+        Vector<MediaBuffer *> mediaBufs;
+        for (size_t i = 0; i < inputBufs.size(); i++) {
+            const sp<ABuffer> &buffer = inputBufs[i];
+            MediaBuffer *mbuf = new MediaBuffer(buffer->data(), buffer->size());
+            mediaBufs.push(mbuf);
+        }
+
+        status_t err = mSource->setBuffers(audio, mediaBufs);
+        if (err != OK) {
+            for (size_t i = 0; i < mediaBufs.size(); ++i) {
+                mediaBufs[i]->release();
+            }
+            mediaBufs.clear();
+            ALOGE("Secure source didn't support secure mediaBufs.");
+            return err;
+        }
+    }
     return OK;
 }
 
@@ -1141,14 +1230,15 @@
                 mTimeDiscontinuityPending =
                     mTimeDiscontinuityPending || timeChange;
 
+                if (mFlushingAudio == NONE && mFlushingVideo == NONE) {
+                    // And we'll resume scanning sources once we're done
+                    // flushing.
+                    mDeferredActions.push_front(
+                            new SimpleAction(
+                                &NuPlayer::performScanSources));
+                }
+
                 if (formatChange || timeChange) {
-                    if (mFlushingAudio == NONE && mFlushingVideo == NONE) {
-                        // And we'll resume scanning sources once we're done
-                        // flushing.
-                        mDeferredActions.push_front(
-                                new SimpleAction(
-                                    &NuPlayer::performScanSources));
-                    }
 
                     sp<AMessage> newFormat = mSource->getFormat(audio);
                     sp<Decoder> &decoder = audio ? mAudioDecoder : mVideoDecoder;
@@ -1184,6 +1274,7 @@
 
         dropAccessUnit = false;
         if (!audio
+                && !(mSourceFlags & Source::FLAG_SECURE)
                 && mVideoLateByUs > 100000ll
                 && mVideoIsAVC
                 && !IsAVCReferenceFrame(accessUnit)) {
@@ -1429,6 +1520,7 @@
           seekTimeUs / 1E6);
 
     mSource->seekTo(seekTimeUs);
+    ++mTimedTextGeneration;
 
     if (mDriver != NULL) {
         sp<NuPlayerDriver> driver = mDriver.promote();
@@ -1497,6 +1589,13 @@
     ++mScanSourcesGeneration;
     mScanSourcesPending = false;
 
+    if (mRendererLooper != NULL) {
+        if (mRenderer != NULL) {
+            mRendererLooper->unregisterHandler(mRenderer->id());
+        }
+        mRendererLooper->stop();
+        mRendererLooper.clear();
+    }
     mRenderer.clear();
 
     if (mSource != NULL) {
@@ -1630,6 +1729,39 @@
             break;
         }
 
+        case Source::kWhatTimedTextData:
+        {
+            int32_t generation;
+            if (msg->findInt32("generation", &generation)
+                    && generation != mTimedTextGeneration) {
+                break;
+            }
+
+            sp<ABuffer> buffer;
+            CHECK(msg->findBuffer("buffer", &buffer));
+
+            sp<NuPlayerDriver> driver = mDriver.promote();
+            if (driver == NULL) {
+                break;
+            }
+
+            int posMs;
+            int64_t timeUs, posUs;
+            driver->getCurrentPosition(&posMs);
+            posUs = posMs * 1000;
+            CHECK(buffer->meta()->findInt64("timeUs", &timeUs));
+
+            if (posUs < timeUs) {
+                if (!msg->findInt32("generation", &generation)) {
+                    msg->setInt32("generation", mTimedTextGeneration);
+                }
+                msg->post(timeUs - posUs);
+            } else {
+                sendTimedTextData(buffer);
+            }
+            break;
+        }
+
         case Source::kWhatQueueDecoderShutdown:
         {
             int32_t audio, video;
@@ -1698,6 +1830,34 @@
 
     notifyListener(MEDIA_SUBTITLE_DATA, 0, 0, &in);
 }
+
+void NuPlayer::sendTimedTextData(const sp<ABuffer> &buffer) {
+    const void *data;
+    size_t size = 0;
+    int64_t timeUs;
+    int32_t flag = TextDescriptions::LOCAL_DESCRIPTIONS;
+
+    AString mime;
+    CHECK(buffer->meta()->findString("mime", &mime));
+    CHECK(strcasecmp(mime.c_str(), MEDIA_MIMETYPE_TEXT_3GPP) == 0);
+
+    data = buffer->data();
+    size = buffer->size();
+
+    Parcel parcel;
+    if (size > 0) {
+        CHECK(buffer->meta()->findInt64("timeUs", &timeUs));
+        flag |= TextDescriptions::IN_BAND_TEXT_3GPP;
+        TextDescriptions::getParcelOfDescriptions(
+                (const uint8_t *)data, size, flag, timeUs / 1000, &parcel);
+    }
+
+    if ((parcel.dataSize() > 0)) {
+        notifyListener(MEDIA_TIMED_TEXT, 0, 0, &parcel);
+    } else {  // send an empty timed text
+        notifyListener(MEDIA_TIMED_TEXT, 0, 0);
+    }
+}
 ////////////////////////////////////////////////////////////////////////////////
 
 void NuPlayer::Source::notifyFlagsChanged(uint32_t flags) {
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.h b/media/libmediaplayerservice/nuplayer/NuPlayer.h
index d7c00aa..8bcf10e 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.h
@@ -125,6 +125,7 @@
     sp<Decoder> mAudioDecoder;
     sp<CCDecoder> mCCDecoder;
     sp<Renderer> mRenderer;
+    sp<ALooper> mRendererLooper;
 
     List<sp<Action> > mDeferredActions;
 
@@ -135,6 +136,7 @@
     int32_t mScanSourcesGeneration;
 
     int32_t mPollDurationGeneration;
+    int32_t mTimedTextGeneration;
 
     enum FlushStatus {
         NONE,
@@ -197,6 +199,7 @@
             bool audio, bool video, const sp<AMessage> &reply);
 
     void sendSubtitleData(const sp<ABuffer> &buffer, int32_t baseIndex);
+    void sendTimedTextData(const sp<ABuffer> &buffer);
 
     void writeTrackInfo(Parcel* reply, const sp<AMessage> format) const;
 
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
index dd73cc4..8fce2f4 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
@@ -26,6 +26,7 @@
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/MediaBuffer.h>
 #include <media/stagefright/MediaCodec.h>
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/MediaErrors.h>
@@ -54,6 +55,22 @@
 NuPlayer::Decoder::~Decoder() {
 }
 
+static
+status_t PostAndAwaitResponse(
+        const sp<AMessage> &msg, sp<AMessage> *response) {
+    status_t err = msg->postAndAwaitResponse(response);
+
+    if (err != OK) {
+        return err;
+    }
+
+    if (!(*response)->findInt32("err", &err)) {
+        err = OK;
+    }
+
+    return err;
+}
+
 void NuPlayer::Decoder::onConfigure(const sp<AMessage> &format) {
     CHECK(mCodec == NULL);
 
@@ -72,8 +89,20 @@
     ALOGV("[%s] onConfigure (surface=%p)", mComponentName.c_str(), surface.get());
 
     mCodec = MediaCodec::CreateByType(mCodecLooper, mime.c_str(), false /* encoder */);
+    int32_t secure = 0;
+    if (format->findInt32("secure", &secure) && secure != 0) {
+        if (mCodec != NULL) {
+            mCodec->getName(&mComponentName);
+            mComponentName.append(".secure");
+            mCodec->release();
+            ALOGI("[%s] creating", mComponentName.c_str());
+            mCodec = MediaCodec::CreateByComponentName(
+                    mCodecLooper, mComponentName.c_str());
+        }
+    }
     if (mCodec == NULL) {
-        ALOGE("Failed to create %s decoder", mime.c_str());
+        ALOGE("Failed to create %s%s decoder",
+                (secure ? "secure " : ""), mime.c_str());
         handleError(UNKNOWN_ERROR);
         return;
     }
@@ -107,6 +136,7 @@
 
     // the following should work after start
     CHECK_EQ((status_t)OK, mCodec->getInputBuffers(&mInputBuffers));
+    releaseAndResetMediaBuffers();
     CHECK_EQ((status_t)OK, mCodec->getOutputBuffers(&mOutputBuffers));
     ALOGV("[%s] got %zu input and %zu output buffers",
             mComponentName.c_str(),
@@ -117,6 +147,24 @@
     mPaused = false;
 }
 
+void NuPlayer::Decoder::releaseAndResetMediaBuffers() {
+    for (size_t i = 0; i < mMediaBuffers.size(); i++) {
+        if (mMediaBuffers[i] != NULL) {
+            mMediaBuffers[i]->release();
+            mMediaBuffers.editItemAt(i) = NULL;
+        }
+    }
+    mMediaBuffers.resize(mInputBuffers.size());
+    for (size_t i = 0; i < mMediaBuffers.size(); i++) {
+        mMediaBuffers.editItemAt(i) = NULL;
+    }
+    mInputBufferIsDequeued.clear();
+    mInputBufferIsDequeued.resize(mInputBuffers.size());
+    for (size_t i = 0; i < mInputBufferIsDequeued.size(); i++) {
+        mInputBufferIsDequeued.editItemAt(i) = false;
+    }
+}
+
 void NuPlayer::Decoder::requestCodecNotification() {
     if (mCodec != NULL) {
         sp<AMessage> reply = new AMessage(kWhatCodecNotify, id());
@@ -141,6 +189,14 @@
     msg->post();
 }
 
+status_t NuPlayer::Decoder::getInputBuffers(Vector<sp<ABuffer> > *buffers) const {
+    sp<AMessage> msg = new AMessage(kWhatGetInputBuffers, id());
+    msg->setPointer("buffers", buffers);
+
+    sp<AMessage> response;
+    return PostAndAwaitResponse(msg, &response);
+}
+
 void NuPlayer::Decoder::handleError(int32_t err)
 {
     sp<AMessage> notify = mNotify->dup();
@@ -163,6 +219,12 @@
 
     CHECK_LT(bufferIx, mInputBuffers.size());
 
+    if (mMediaBuffers[bufferIx] != NULL) {
+        mMediaBuffers[bufferIx]->release();
+        mMediaBuffers.editItemAt(bufferIx) = NULL;
+    }
+    mInputBufferIsDequeued.editItemAt(bufferIx) = true;
+
     sp<AMessage> reply = new AMessage(kWhatInputBufferFilled, id());
     reply->setSize("buffer-ix", bufferIx);
     reply->setInt32("generation", mBufferGeneration);
@@ -183,6 +245,44 @@
 
     sp<ABuffer> buffer;
     bool hasBuffer = msg->findBuffer("buffer", &buffer);
+
+    // handle widevine classic source - that fills an arbitrary input buffer
+    MediaBuffer *mediaBuffer = NULL;
+    if (hasBuffer && buffer->meta()->findPointer(
+            "mediaBuffer", (void **)&mediaBuffer)) {
+        if (mediaBuffer == NULL) {
+            // received no actual buffer
+            ALOGW("[%s] received null MediaBuffer %s",
+                    mComponentName.c_str(), msg->debugString().c_str());
+            buffer = NULL;
+        } else {
+            // likely filled another buffer than we requested: adjust buffer index
+            size_t ix;
+            for (ix = 0; ix < mInputBuffers.size(); ix++) {
+                const sp<ABuffer> &buf = mInputBuffers[ix];
+                if (buf->data() == mediaBuffer->data()) {
+                    // all input buffers are dequeued on start, hence the check
+                    CHECK(mInputBufferIsDequeued[ix]);
+                    ALOGV("[%s] received MediaBuffer for #%zu instead of #%zu",
+                            mComponentName.c_str(), ix, bufferIx);
+
+                    // TRICKY: need buffer for the metadata, so instead, set
+                    // codecBuffer to the same (though incorrect) buffer to
+                    // avoid a memcpy into the codecBuffer
+                    codecBuffer = buffer;
+                    codecBuffer->setRange(
+                            mediaBuffer->range_offset(),
+                            mediaBuffer->range_length());
+                    bufferIx = ix;
+                    break;
+                }
+            }
+            CHECK(ix < mInputBuffers.size());
+        }
+    }
+
+    mInputBufferIsDequeued.editItemAt(bufferIx) = false;
+
     if (buffer == NULL /* includes !hasBuffer */) {
         int32_t streamErr = ERROR_END_OF_STREAM;
         CHECK(msg->findInt32("err", &streamErr) || !hasBuffer);
@@ -236,6 +336,11 @@
                     mComponentName.c_str(), err);
             handleError(err);
         }
+
+        if (mediaBuffer != NULL) {
+            CHECK(mMediaBuffers[bufferIx] == NULL);
+            mMediaBuffers.editItemAt(bufferIx) = mediaBuffer;
+        }
     }
 }
 
@@ -352,6 +457,8 @@
         return;
     }
 
+    releaseAndResetMediaBuffers();
+
     sp<AMessage> notify = mNotify->dup();
     notify->setInt32("what", kWhatFlushCompleted);
     notify->post();
@@ -379,6 +486,8 @@
         mComponentName = "decoder";
     }
 
+    releaseAndResetMediaBuffers();
+
     if (err != OK) {
         ALOGE("failed to release %s (err=%d)", mComponentName.c_str(), err);
         handleError(err);
@@ -403,6 +512,23 @@
             break;
         }
 
+        case kWhatGetInputBuffers:
+        {
+            uint32_t replyID;
+            CHECK(msg->senderAwaitsResponse(&replyID));
+
+            Vector<sp<ABuffer> > *dstBuffers;
+            CHECK(msg->findPointer("buffers", (void **)&dstBuffers));
+
+            dstBuffers->clear();
+            for (size_t i = 0; i < mInputBuffers.size(); i++) {
+                dstBuffers->push(mInputBuffers[i]);
+            }
+
+            (new AMessage)->postReply(replyID);
+            break;
+        }
+
         case kWhatCodecNotify:
         {
             if (!isStaleReply(msg)) {
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h
index 4fa0dbd..c6fc237 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h
@@ -26,6 +26,7 @@
 
 struct ABuffer;
 struct MediaCodec;
+struct MediaBuffer;
 
 struct NuPlayer::Decoder : public AHandler {
     Decoder(const sp<AMessage> &notify,
@@ -34,6 +35,7 @@
     virtual void configure(const sp<AMessage> &format);
     virtual void init();
 
+    status_t getInputBuffers(Vector<sp<ABuffer> > *dstBuffers) const;
     virtual void signalFlush();
     virtual void signalResume();
     virtual void initiateShutdown();
@@ -60,6 +62,7 @@
     enum {
         kWhatCodecNotify        = 'cdcN',
         kWhatConfigure          = 'conf',
+        kWhatGetInputBuffers    = 'gInB',
         kWhatInputBufferFilled  = 'inpF',
         kWhatRenderBuffer       = 'rndr',
         kWhatFlush              = 'flus',
@@ -77,11 +80,14 @@
 
     Vector<sp<ABuffer> > mInputBuffers;
     Vector<sp<ABuffer> > mOutputBuffers;
+    Vector<bool> mInputBufferIsDequeued;
+    Vector<MediaBuffer *> mMediaBuffers;
 
     void handleError(int32_t err);
     bool handleAnInputBuffer();
     bool handleAnOutputBuffer();
 
+    void releaseAndResetMediaBuffers();
     void requestCodecNotification();
     bool isStaleReply(const sp<AMessage> &msg);
 
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
index 280b5af..4748546 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
@@ -42,6 +42,7 @@
       mLooper(new ALooper),
       mPlayerFlags(0),
       mAtEOS(false),
+      mLooping(false),
       mStartupSeekTimeUs(-1) {
     mLooper->setName("NuPlayerDriver Looper");
 
@@ -76,6 +77,7 @@
         const KeyedVector<String8, String8> *headers) {
     Mutex::Autolock autoLock(mLock);
 
+    ALOGV("setDataSource: url=%s", url);
     if (mState != STATE_IDLE) {
         return INVALID_OPERATION;
     }
@@ -94,6 +96,7 @@
 status_t NuPlayerDriver::setDataSource(int fd, int64_t offset, int64_t length) {
     Mutex::Autolock autoLock(mLock);
 
+    ALOGV("setDataSource: fd=%d", fd);
     if (mState != STATE_IDLE) {
         return INVALID_OPERATION;
     }
@@ -112,6 +115,7 @@
 status_t NuPlayerDriver::setDataSource(const sp<IStreamSource> &source) {
     Mutex::Autolock autoLock(mLock);
 
+    ALOGV("setDataSource: stream source");
     if (mState != STATE_IDLE) {
         return INVALID_OPERATION;
     }
@@ -367,12 +371,14 @@
     mDurationUs = -1;
     mPositionUs = -1;
     mStartupSeekTimeUs = -1;
+    mLooping = false;
 
     return OK;
 }
 
-status_t NuPlayerDriver::setLooping(int /* loop */) {
-    return INVALID_OPERATION;
+status_t NuPlayerDriver::setLooping(int loop) {
+    mLooping = loop != 0;
+    return OK;
 }
 
 player_type NuPlayerDriver::playerType() {
@@ -523,8 +529,24 @@
 
 void NuPlayerDriver::notifyListener(
         int msg, int ext1, int ext2, const Parcel *in) {
-    if (msg == MEDIA_PLAYBACK_COMPLETE || msg == MEDIA_ERROR) {
-        mAtEOS = true;
+    switch (msg) {
+        case MEDIA_PLAYBACK_COMPLETE:
+        {
+            if (mLooping) {
+                mPlayer->seekToAsync(0);
+                break;
+            }
+            // fall through
+        }
+
+        case MEDIA_ERROR:
+        {
+            mAtEOS = true;
+            break;
+        }
+
+        default:
+            break;
     }
 
     sendEvent(msg, ext1, ext2, in);
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.h b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.h
index 0148fb1..9424aae 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.h
@@ -111,6 +111,7 @@
     uint32_t mPlayerFlags;
 
     bool mAtEOS;
+    bool mLooping;
 
     int64_t mStartupSeekTimeUs;
 
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
index f520ff7..3640038 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
@@ -26,6 +26,8 @@
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/MetaData.h>
 
+#include <inttypes.h>
+
 namespace android {
 
 // static
@@ -221,6 +223,12 @@
             break;
         }
 
+        case kWhatAudioOffloadTearDown:
+        {
+            onAudioOffloadTearDown();
+            break;
+        }
+
         default:
             TRESPASS();
             break;
@@ -292,7 +300,7 @@
 
         case MediaPlayerBase::AudioSink::CB_EVENT_TEAR_DOWN:
         {
-            // TODO: send this to player.
+            me->notifyAudioOffloadTearDown();
             break;
         }
     }
@@ -502,6 +510,7 @@
         }
     }
 
+    ALOGW_IF(delayUs > 500000, "unusually high delayUs: %" PRId64, delayUs);
     msg->post(delayUs);
 
     mDrainVideoQueuePending = true;
@@ -579,6 +588,10 @@
     notify->post();
 }
 
+void NuPlayer::Renderer::notifyAudioOffloadTearDown() {
+    (new AMessage(kWhatAudioOffloadTearDown, id()))->post();
+}
+
 void NuPlayer::Renderer::onQueueBuffer(const sp<AMessage> &msg) {
     int32_t audio;
     CHECK(msg->findInt32("audio", &audio));
@@ -811,6 +824,7 @@
 void NuPlayer::Renderer::onDisableOffloadAudio() {
     Mutex::Autolock autoLock(mLock);
     mFlags &= ~FLAG_OFFLOAD_AUDIO;
+    ++mAudioQueueGeneration;
 }
 
 void NuPlayer::Renderer::notifyPosition() {
@@ -877,5 +891,21 @@
     }
 }
 
+void NuPlayer::Renderer::onAudioOffloadTearDown() {
+    uint32_t numFramesPlayed;
+    CHECK_EQ(mAudioSink->getPosition(&numFramesPlayed), (status_t)OK);
+
+    int64_t currentPositionUs = mFirstAudioTimeUs
+            + (numFramesPlayed * mAudioSink->msecsPerFrame()) * 1000ll;
+
+    mAudioSink->stop();
+    mAudioSink->flush();
+
+    sp<AMessage> notify = mNotify->dup();
+    notify->setInt32("what", kWhatAudioOffloadTearDown);
+    notify->setInt64("positionUs", currentPositionUs);
+    notify->post();
+}
+
 }  // namespace android
 
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
index 6e86a8f..1cba1a0 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
@@ -62,6 +62,7 @@
         kWhatPosition            = 'posi',
         kWhatVideoRenderingStart = 'vdrd',
         kWhatMediaRenderingStart = 'mdrd',
+        kWhatAudioOffloadTearDown = 'aOTD',
     };
 
 protected:
@@ -143,12 +144,14 @@
     void onDisableOffloadAudio();
     void onPause();
     void onResume();
+    void onAudioOffloadTearDown();
 
     void notifyEOS(bool audio, status_t finalResult);
     void notifyFlushComplete(bool audio);
     void notifyPosition();
     void notifyVideoLateBy(int64_t lateByUs);
     void notifyVideoRenderingStart();
+    void notifyAudioOffloadTearDown();
 
     void flushQueue(List<QueueEntry> *queue);
     bool dropBufferWhileFlushing(bool audio, const sp<AMessage> &msg);
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerSource.h b/media/libmediaplayerservice/nuplayer/NuPlayerSource.h
index 632c4a6..0ec017e 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerSource.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerSource.h
@@ -21,11 +21,14 @@
 #include "NuPlayer.h"
 
 #include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/MetaData.h>
+#include <utils/Vector.h>
 
 namespace android {
 
 struct ABuffer;
 struct MetaData;
+struct MediaBuffer;
 
 struct NuPlayer::Source : public AHandler {
     enum Flags {
@@ -34,6 +37,7 @@
         FLAG_CAN_SEEK_FORWARD   = 4,  // the "10 sec forward button"
         FLAG_CAN_SEEK           = 8,  // the "seek bar"
         FLAG_DYNAMIC_DURATION   = 16,
+        FLAG_SECURE             = 32,
     };
 
     enum {
@@ -43,6 +47,7 @@
         kWhatBufferingStart,
         kWhatBufferingEnd,
         kWhatSubtitleData,
+        kWhatTimedTextData,
         kWhatQueueDecoderShutdown,
     };
 
@@ -89,6 +94,10 @@
         return INVALID_OPERATION;
     }
 
+    virtual status_t setBuffers(bool /* audio */, Vector<MediaBuffer *> &/* buffers */) {
+        return INVALID_OPERATION;
+    }
+
     virtual bool isRealTime() const {
         return false;
     }
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index 9c64d72..5b6e59e 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -368,6 +368,7 @@
       mExplicitShutdown(false),
       mEncoderDelay(0),
       mEncoderPadding(0),
+      mRotationDegrees(0),
       mChannelMaskPresent(false),
       mChannelMask(0),
       mDequeueCounter(0),
@@ -591,6 +592,27 @@
         return err;
     }
 
+    if (mRotationDegrees != 0) {
+        uint32_t transform = 0;
+        switch (mRotationDegrees) {
+            case 0: transform = 0; break;
+            case 90: transform = HAL_TRANSFORM_ROT_90; break;
+            case 180: transform = HAL_TRANSFORM_ROT_180; break;
+            case 270: transform = HAL_TRANSFORM_ROT_270; break;
+            default: transform = 0; break;
+        }
+
+        if (transform > 0) {
+            err = native_window_set_buffers_transform(
+                    mNativeWindow.get(), transform);
+            if (err != 0) {
+                ALOGE("native_window_set_buffers_transform failed: %s (%d)",
+                        strerror(-err), -err);
+                return err;
+            }
+        }
+    }
+
     // Set up the native window.
     OMX_U32 usage = 0;
     err = mOMX->getGraphicBufferUsage(mNode, kPortIndexOutput, &usage);
@@ -1232,19 +1254,20 @@
                 && push != 0) {
             mFlags |= kFlagPushBlankBuffersToNativeWindowOnShutdown;
         }
+
+        int32_t rotationDegrees;
+        if (msg->findInt32("rotation-degrees", &rotationDegrees)) {
+            mRotationDegrees = rotationDegrees;
+        } else {
+            mRotationDegrees = 0;
+        }
     }
 
     if (video) {
         if (encoder) {
             err = setupVideoEncoder(mime, msg);
         } else {
-            int32_t width, height;
-            if (!msg->findInt32("width", &width)
-                    || !msg->findInt32("height", &height)) {
-                err = INVALID_OPERATION;
-            } else {
-                err = setupVideoDecoder(mime, width, height);
-            }
+            err = setupVideoDecoder(mime, msg);
         }
     } else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_MPEG)) {
         int32_t numChannels, sampleRate;
@@ -1854,7 +1877,13 @@
 }
 
 status_t ACodec::setupVideoDecoder(
-        const char *mime, int32_t width, int32_t height) {
+        const char *mime, const sp<AMessage> &msg) {
+    int32_t width, height;
+    if (!msg->findInt32("width", &width)
+            || !msg->findInt32("height", &height)) {
+        return INVALID_OPERATION;
+    }
+
     OMX_VIDEO_CODINGTYPE compressionFormat;
     status_t err = GetVideoCodingTypeFromMime(mime, &compressionFormat);
 
@@ -1869,7 +1898,20 @@
         return err;
     }
 
-    err = setSupportedOutputFormat();
+    int32_t tmp;
+    if (msg->findInt32("color-format", &tmp)) {
+        OMX_COLOR_FORMATTYPE colorFormat =
+            static_cast<OMX_COLOR_FORMATTYPE>(tmp);
+        err = setVideoPortFormatType(
+                kPortIndexOutput, OMX_VIDEO_CodingUnused, colorFormat);
+        if (err != OK) {
+            ALOGW("[%s] does not support color format %d",
+                  mComponentName.c_str(), colorFormat);
+            err = setSupportedOutputFormat();
+        }
+    } else {
+        err = setSupportedOutputFormat();
+    }
 
     if (err != OK) {
         return err;
@@ -2709,6 +2751,83 @@
     }
 }
 
+// static
+void ACodec::describeDefaultColorFormat(DescribeColorFormatParams &params) {
+    MediaImage &image = params.sMediaImage;
+    memset(&image, 0, sizeof(image));
+
+    image.mType = MediaImage::MEDIA_IMAGE_TYPE_UNKNOWN;
+    image.mNumPlanes = 0;
+
+    const OMX_COLOR_FORMATTYPE fmt = params.eColorFormat;
+    // we need stride and slice-height to be non-zero
+    if (params.nStride == 0 || params.nSliceHeight == 0) {
+        ALOGW("cannot describe color format 0x%x = %d with stride=%u and sliceHeight=%u",
+                fmt, fmt, params.nStride, params.nSliceHeight);
+        return;
+    }
+
+    image.mWidth = params.nFrameWidth;
+    image.mHeight = params.nFrameHeight;
+
+    // only supporting YUV420
+    if (fmt != OMX_COLOR_FormatYUV420Planar &&
+        fmt != OMX_COLOR_FormatYUV420PackedPlanar &&
+        fmt != OMX_COLOR_FormatYUV420SemiPlanar &&
+        fmt != OMX_COLOR_FormatYUV420PackedSemiPlanar) {
+        ALOGW("do not know color format 0x%x = %d", fmt, fmt);
+        return;
+    }
+
+    // set-up YUV format
+    image.mType = MediaImage::MEDIA_IMAGE_TYPE_YUV;
+    image.mNumPlanes = 3;
+    image.mBitDepth = 8;
+    image.mPlane[image.Y].mOffset = 0;
+    image.mPlane[image.Y].mColInc = 1;
+    image.mPlane[image.Y].mRowInc = params.nStride;
+    image.mPlane[image.Y].mHorizSubsampling = 1;
+    image.mPlane[image.Y].mVertSubsampling = 1;
+
+    switch (fmt) {
+        case OMX_COLOR_FormatYUV420Planar: // used for YV12
+        case OMX_COLOR_FormatYUV420PackedPlanar:
+            image.mPlane[image.U].mOffset = params.nStride * params.nSliceHeight;
+            image.mPlane[image.U].mColInc = 1;
+            image.mPlane[image.U].mRowInc = params.nStride / 2;
+            image.mPlane[image.U].mHorizSubsampling = 2;
+            image.mPlane[image.U].mVertSubsampling = 2;
+
+            image.mPlane[image.V].mOffset = image.mPlane[image.U].mOffset
+                    + (params.nStride * params.nSliceHeight / 4);
+            image.mPlane[image.V].mColInc = 1;
+            image.mPlane[image.V].mRowInc = params.nStride / 2;
+            image.mPlane[image.V].mHorizSubsampling = 2;
+            image.mPlane[image.V].mVertSubsampling = 2;
+            break;
+
+        case OMX_COLOR_FormatYUV420SemiPlanar:
+            // FIXME: NV21 for sw-encoder, NV12 for decoder and hw-encoder
+        case OMX_COLOR_FormatYUV420PackedSemiPlanar:
+            // NV12
+            image.mPlane[image.U].mOffset = params.nStride * params.nSliceHeight;
+            image.mPlane[image.U].mColInc = 2;
+            image.mPlane[image.U].mRowInc = params.nStride;
+            image.mPlane[image.U].mHorizSubsampling = 2;
+            image.mPlane[image.U].mVertSubsampling = 2;
+
+            image.mPlane[image.V].mOffset = image.mPlane[image.U].mOffset + 1;
+            image.mPlane[image.V].mColInc = 2;
+            image.mPlane[image.V].mRowInc = params.nStride;
+            image.mPlane[image.V].mHorizSubsampling = 2;
+            image.mPlane[image.V].mVertSubsampling = 2;
+            break;
+
+        default:
+            TRESPASS();
+    }
+}
+
 status_t ACodec::getPortFormat(OMX_U32 portIndex, sp<AMessage> &notify) {
     // TODO: catch errors an return them instead of using CHECK
     OMX_PARAM_PORTDEFINITIONTYPE def;
@@ -2736,6 +2855,33 @@
                     notify->setInt32("slice-height", videoDef->nSliceHeight);
                     notify->setInt32("color-format", videoDef->eColorFormat);
 
+
+                    DescribeColorFormatParams describeParams;
+                    InitOMXParams(&describeParams);
+                    describeParams.eColorFormat = videoDef->eColorFormat;
+                    describeParams.nFrameWidth = videoDef->nFrameWidth;
+                    describeParams.nFrameHeight = videoDef->nFrameHeight;
+                    describeParams.nStride = videoDef->nStride;
+                    describeParams.nSliceHeight = videoDef->nSliceHeight;
+
+                    OMX_INDEXTYPE describeColorFormatIndex;
+                    if (mOMX->getExtensionIndex(
+                            mNode, "OMX.google.android.index.describeColorFormat",
+                            &describeColorFormatIndex) ||
+                        mOMX->getParameter(
+                            mNode, describeColorFormatIndex,
+                            &describeParams, sizeof(describeParams))) {
+                        describeDefaultColorFormat(describeParams);
+                    }
+
+                    if (describeParams.sMediaImage.mType != MediaImage::MEDIA_IMAGE_TYPE_UNKNOWN) {
+                        notify->setBuffer(
+                                "image-data",
+                                ABuffer::CreateAsCopy(
+                                        &describeParams.sMediaImage,
+                                        sizeof(describeParams.sMediaImage)));
+                    }
+
                     OMX_CONFIG_RECTTYPE rect;
                     InitOMXParams(&rect);
                     rect.nPortIndex = kPortIndexOutput;
@@ -2765,6 +2911,50 @@
 
                     break;
                 }
+
+                case OMX_VIDEO_CodingVP8:
+                case OMX_VIDEO_CodingVP9:
+                {
+                    OMX_VIDEO_PARAM_ANDROID_VP8ENCODERTYPE vp8type;
+                    InitOMXParams(&vp8type);
+                    vp8type.nPortIndex = kPortIndexOutput;
+                    status_t err = mOMX->getParameter(
+                            mNode,
+                            (OMX_INDEXTYPE)OMX_IndexParamVideoAndroidVp8Encoder,
+                            &vp8type,
+                            sizeof(vp8type));
+
+                    if (err == OK) {
+                        AString tsSchema = "none";
+                        if (vp8type.eTemporalPattern
+                                == OMX_VIDEO_VPXTemporalLayerPatternWebRTC) {
+                            switch (vp8type.nTemporalLayerCount) {
+                                case 1:
+                                {
+                                    tsSchema = "webrtc.vp8.1-layer";
+                                    break;
+                                }
+                                case 2:
+                                {
+                                    tsSchema = "webrtc.vp8.2-layer";
+                                    break;
+                                }
+                                case 3:
+                                {
+                                    tsSchema = "webrtc.vp8.3-layer";
+                                    break;
+                                }
+                                default:
+                                {
+                                    break;
+                                }
+                            }
+                        }
+                        notify->setString("ts-schema", tsSchema);
+                    }
+                    // Fall through to set up mime.
+                }
+
                 default:
                 {
                     CHECK(mIsEncoder ^ (portIndex == kPortIndexInput));
@@ -3989,6 +4179,8 @@
 
         if (err == OK) {
             break;
+        } else {
+            ALOGW("Allocating component '%s' failed, try next one.", componentName.c_str());
         }
 
         node = NULL;
@@ -4504,11 +4696,14 @@
 
     submitOutputBuffers();
 
-    // Post the first input buffer.
+    // Post all available input buffers
     CHECK_GT(mCodec->mBuffers[kPortIndexInput].size(), 0u);
-    BufferInfo *info = &mCodec->mBuffers[kPortIndexInput].editItemAt(0);
-
-    postFillThisBuffer(info);
+    for (size_t i = 0; i < mCodec->mBuffers[kPortIndexInput].size(); i++) {
+        BufferInfo *info = &mCodec->mBuffers[kPortIndexInput].editItemAt(i);
+        if (info->mStatus == BufferInfo::OWNED_BY_US) {
+            postFillThisBuffer(info);
+        }
+    }
 
     mActive = true;
 }
diff --git a/media/libstagefright/AudioSource.cpp b/media/libstagefright/AudioSource.cpp
index d9aed01..a67fabe 100644
--- a/media/libstagefright/AudioSource.cpp
+++ b/media/libstagefright/AudioSource.cpp
@@ -159,6 +159,8 @@
     waitOutstandingEncodingFrames_l();
     releaseQueuedFrames_l();
 
+    mFrameAvailableCondition.signal();
+
     return OK;
 }
 
diff --git a/media/libstagefright/MPEG4Extractor.cpp b/media/libstagefright/MPEG4Extractor.cpp
index 207acc8..0064293 100644
--- a/media/libstagefright/MPEG4Extractor.cpp
+++ b/media/libstagefright/MPEG4Extractor.cpp
@@ -489,7 +489,9 @@
         off64_t orig_offset = offset;
         err = parseChunk(&offset, 0);
 
-        if (offset <= orig_offset) {
+        if (err != OK && err != UNKNOWN_ERROR) {
+            break;
+        } else if (offset <= orig_offset) {
             // only continue parsing if the offset was advanced,
             // otherwise we might end up in an infinite loop
             ALOGE("did not advance: 0x%lld->0x%lld", orig_offset, offset);
@@ -497,9 +499,8 @@
             break;
         } else if (err == OK) {
             continue;
-        } else if (err != UNKNOWN_ERROR) {
-            break;
         }
+
         uint32_t hdr[2];
         if (mDataSource->readAt(offset, hdr, 8) < 8) {
             break;
@@ -3665,7 +3666,7 @@
 
         uint32_t sampleIndex;
         status_t err = mSampleTable->findSampleAtTime(
-                seekTimeUs * mTimescale / 1000000,
+                seekTimeUs, 1000000, mTimescale,
                 &sampleIndex, findFlags);
 
         if (mode == ReadOptions::SEEK_CLOSEST) {
diff --git a/media/libstagefright/MediaBufferGroup.cpp b/media/libstagefright/MediaBufferGroup.cpp
index 80aae51..6ac6d4a 100644
--- a/media/libstagefright/MediaBufferGroup.cpp
+++ b/media/libstagefright/MediaBufferGroup.cpp
@@ -55,7 +55,8 @@
     mLastBuffer = buffer;
 }
 
-status_t MediaBufferGroup::acquire_buffer(MediaBuffer **out) {
+status_t MediaBufferGroup::acquire_buffer(
+        MediaBuffer **out, bool nonBlocking) {
     Mutex::Autolock autoLock(mLock);
 
     for (;;) {
@@ -70,6 +71,11 @@
             }
         }
 
+        if (nonBlocking) {
+            *out = NULL;
+            return WOULD_BLOCK;
+        }
+
         // All buffers are in use. Block until one of them is returned to us.
         mCondition.wait(mLock);
     }
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index 7a9cb0b..7c02959 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -16,13 +16,13 @@
 
 //#define LOG_NDEBUG 0
 #define LOG_TAG "MediaCodec"
-#include <utils/Log.h>
 #include <inttypes.h>
 
-#include <media/stagefright/MediaCodec.h>
-
+#include "include/avc_utils.h"
 #include "include/SoftwareRenderer.h"
 
+#include <binder/IBatteryStats.h>
+#include <binder/IServiceManager.h>
 #include <gui/Surface.h>
 #include <media/ICrypto.h>
 #include <media/stagefright/foundation/ABuffer.h>
@@ -32,16 +32,85 @@
 #include <media/stagefright/foundation/hexdump.h>
 #include <media/stagefright/ACodec.h>
 #include <media/stagefright/BufferProducerWrapper.h>
+#include <media/stagefright/MediaCodec.h>
 #include <media/stagefright/MediaCodecList.h>
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/MetaData.h>
 #include <media/stagefright/NativeWindowWrapper.h>
-
-#include "include/avc_utils.h"
+#include <private/android_filesystem_config.h>
+#include <utils/Log.h>
+#include <utils/Singleton.h>
 
 namespace android {
 
+struct MediaCodec::BatteryNotifier : public Singleton<BatteryNotifier> {
+    BatteryNotifier();
+
+    void noteStartVideo();
+    void noteStopVideo();
+    void noteStartAudio();
+    void noteStopAudio();
+
+private:
+    int32_t mVideoRefCount;
+    int32_t mAudioRefCount;
+    sp<IBatteryStats> mBatteryStatService;
+};
+
+ANDROID_SINGLETON_STATIC_INSTANCE(MediaCodec::BatteryNotifier)
+
+MediaCodec::BatteryNotifier::BatteryNotifier() :
+    mVideoRefCount(0),
+    mAudioRefCount(0) {
+    // get battery service
+    const sp<IServiceManager> sm(defaultServiceManager());
+    if (sm != NULL) {
+        const String16 name("batterystats");
+        mBatteryStatService = interface_cast<IBatteryStats>(sm->getService(name));
+        if (mBatteryStatService == NULL) {
+            ALOGE("batterystats service unavailable!");
+        }
+    }
+}
+
+void MediaCodec::BatteryNotifier::noteStartVideo() {
+    if (mVideoRefCount == 0 && mBatteryStatService != NULL) {
+        mBatteryStatService->noteStartVideo(AID_MEDIA);
+    }
+    mVideoRefCount++;
+}
+
+void MediaCodec::BatteryNotifier::noteStopVideo() {
+    if (mVideoRefCount == 0) {
+        ALOGW("BatteryNotifier::noteStop(): video refcount is broken!");
+        return;
+    }
+
+    mVideoRefCount--;
+    if (mVideoRefCount == 0 && mBatteryStatService != NULL) {
+        mBatteryStatService->noteStopVideo(AID_MEDIA);
+    }
+}
+
+void MediaCodec::BatteryNotifier::noteStartAudio() {
+    if (mAudioRefCount == 0 && mBatteryStatService != NULL) {
+        mBatteryStatService->noteStartAudio(AID_MEDIA);
+    }
+    mAudioRefCount++;
+}
+
+void MediaCodec::BatteryNotifier::noteStopAudio() {
+    if (mAudioRefCount == 0) {
+        ALOGW("BatteryNotifier::noteStop(): audio refcount is broken!");
+        return;
+    }
+
+    mAudioRefCount--;
+    if (mAudioRefCount == 0 && mBatteryStatService != NULL) {
+        mBatteryStatService->noteStopAudio(AID_MEDIA);
+    }
+}
 // static
 sp<MediaCodec> MediaCodec::CreateByType(
         const sp<ALooper> &looper, const char *mime, bool encoder) {
@@ -71,6 +140,8 @@
       mReplyID(0),
       mFlags(0),
       mSoftRenderer(NULL),
+      mBatteryStatNotified(false),
+      mIsVideo(false),
       mDequeueInputTimeoutGeneration(0),
       mDequeueInputReplyID(0),
       mDequeueOutputTimeoutGeneration(0),
@@ -479,6 +550,10 @@
     return getBufferAndFormat(kPortIndexInput, index, buffer, &format);
 }
 
+bool MediaCodec::isExecuting() const {
+    return mState == STARTED || mState == FLUSHED;
+}
+
 status_t MediaCodec::getBufferAndFormat(
         size_t portIndex, size_t index,
         sp<ABuffer> *buffer, sp<AMessage> *format) {
@@ -486,7 +561,7 @@
 
     buffer->clear();
     format->clear();
-    if (mState != STARTED) {
+    if (!isExecuting()) {
         return INVALID_OPERATION;
     }
 
@@ -544,7 +619,7 @@
 }
 
 bool MediaCodec::handleDequeueInputBuffer(uint32_t replyID, bool newRequest) {
-    if (mState != STARTED
+    if (!isExecuting() || (mFlags & kFlagIsAsync)
             || (mFlags & kFlagStickyError)
             || (newRequest && (mFlags & kFlagDequeueInputPending))) {
         PostReplyWithError(replyID, INVALID_OPERATION);
@@ -568,7 +643,7 @@
 bool MediaCodec::handleDequeueOutputBuffer(uint32_t replyID, bool newRequest) {
     sp<AMessage> response = new AMessage;
 
-    if (mState != STARTED
+    if (!isExecuting() || (mFlags & kFlagIsAsync)
             || (mFlags & kFlagStickyError)
             || (newRequest && (mFlags & kFlagDequeueOutputPending))) {
         response->setInt32("err", INVALID_OPERATION);
@@ -689,10 +764,12 @@
 
                         case FLUSHING:
                         {
-                            setState(STARTED);
+                            setState(
+                                    (mFlags & kFlagIsAsync) ? FLUSHED : STARTED);
                             break;
                         }
 
+                        case FLUSHED:
                         case STARTED:
                         {
                             sendErrorReponse = false;
@@ -756,7 +833,6 @@
                 case CodecBase::kWhatComponentConfigured:
                 {
                     CHECK_EQ(mState, CONFIGURING);
-                    setState(CONFIGURED);
 
                     // reset input surface flag
                     mHaveInputSurface = false;
@@ -764,6 +840,7 @@
                     CHECK(msg->findMessage("input-format", &mInputFormat));
                     CHECK(msg->findMessage("output-format", &mOutputFormat));
 
+                    setState(CONFIGURED);
                     (new AMessage)->postReply(mReplyID);
                     break;
                 }
@@ -1034,9 +1111,13 @@
                 case CodecBase::kWhatFlushCompleted:
                 {
                     CHECK_EQ(mState, FLUSHING);
-                    setState(STARTED);
 
-                    mCodec->signalResume();
+                    if (mFlags & kFlagIsAsync) {
+                        setState(FLUSHED);
+                    } else {
+                        setState(STARTED);
+                        mCodec->signalResume();
+                    }
 
                     (new AMessage)->postReply(mReplyID);
                     break;
@@ -1091,8 +1172,8 @@
 
             if (mState == UNINITIALIZED
                     || mState == INITIALIZING
-                    || mState == STARTED) {
-                // callback can't be set after codec is started,
+                    || isExecuting()) {
+                // callback can't be set after codec is executing,
                 // or before it's initialized (as the callback
                 // will be cleared when it goes to INITIALIZED)
                 PostReplyWithError(replyID, INVALID_OPERATION);
@@ -1194,7 +1275,10 @@
             uint32_t replyID;
             CHECK(msg->senderAwaitsResponse(&replyID));
 
-            if (mState != CONFIGURED) {
+            if (mState == FLUSHED) {
+                mCodec->signalResume();
+                PostReplyWithError(replyID, OK);
+            } else if (mState != CONFIGURED) {
                 PostReplyWithError(replyID, INVALID_OPERATION);
                 break;
             }
@@ -1216,7 +1300,7 @@
             CHECK(msg->senderAwaitsResponse(&replyID));
 
             if (mState != INITIALIZED
-                    && mState != CONFIGURED && mState != STARTED) {
+                    && mState != CONFIGURED && !isExecuting()) {
                 // We may be in "UNINITIALIZED" state already without the
                 // client being aware of this if media server died while
                 // we were being stopped. The client would assume that
@@ -1317,7 +1401,7 @@
             uint32_t replyID;
             CHECK(msg->senderAwaitsResponse(&replyID));
 
-            if (mState != STARTED || (mFlags & kFlagStickyError)) {
+            if (!isExecuting() || (mFlags & kFlagStickyError)) {
                 PostReplyWithError(replyID, INVALID_OPERATION);
                 break;
             }
@@ -1388,7 +1472,7 @@
             uint32_t replyID;
             CHECK(msg->senderAwaitsResponse(&replyID));
 
-            if (mState != STARTED || (mFlags & kFlagStickyError)) {
+            if (!isExecuting() || (mFlags & kFlagStickyError)) {
                 PostReplyWithError(replyID, INVALID_OPERATION);
                 break;
             }
@@ -1404,7 +1488,7 @@
             uint32_t replyID;
             CHECK(msg->senderAwaitsResponse(&replyID));
 
-            if (mState != STARTED || (mFlags & kFlagStickyError)) {
+            if (!isExecuting() || (mFlags & kFlagStickyError)) {
                 PostReplyWithError(replyID, INVALID_OPERATION);
                 break;
             }
@@ -1419,7 +1503,8 @@
             uint32_t replyID;
             CHECK(msg->senderAwaitsResponse(&replyID));
 
-            if (mState != STARTED || (mFlags & kFlagStickyError)) {
+            if (!isExecuting() || (mFlags & kFlagIsAsync)
+                    || (mFlags & kFlagStickyError)) {
                 PostReplyWithError(replyID, INVALID_OPERATION);
                 break;
             }
@@ -1450,12 +1535,13 @@
             uint32_t replyID;
             CHECK(msg->senderAwaitsResponse(&replyID));
 
-            if (mState != STARTED || (mFlags & kFlagStickyError)) {
+            if (!isExecuting() || (mFlags & kFlagStickyError)) {
                 PostReplyWithError(replyID, INVALID_OPERATION);
                 break;
             }
 
             mReplyID = replyID;
+            // TODO: skip flushing if already FLUSHED
             setState(FLUSHING);
 
             mCodec->signalFlush();
@@ -1473,7 +1559,8 @@
             CHECK(msg->senderAwaitsResponse(&replyID));
 
             if ((mState != CONFIGURED && mState != STARTING &&
-                 mState != STARTED && mState != FLUSHING)
+                 mState != STARTED && mState != FLUSHING &&
+                 mState != FLUSHED)
                     || (mFlags & kFlagStickyError)
                     || format == NULL) {
                 PostReplyWithError(replyID, INVALID_OPERATION);
@@ -1620,6 +1707,8 @@
     mState = newState;
 
     cancelPendingDequeueOperations();
+
+    updateBatteryStat();
 }
 
 void MediaCodec::returnBuffersToCodec() {
@@ -1806,7 +1895,7 @@
         render = 0;
     }
 
-    if (mState != STARTED) {
+    if (!isExecuting()) {
         return -EINVAL;
     }
 
@@ -1875,6 +1964,18 @@
         Mutex::Autolock al(mBufferLock);
         info->mFormat = portIndex == kPortIndexInput ? mInputFormat : mOutputFormat;
         info->mOwnedByClient = true;
+
+        // set image-data
+        if (info->mFormat != NULL) {
+            sp<ABuffer> imageData;
+            if (info->mFormat->findBuffer("image-data", &imageData)) {
+                info->mData->meta()->setBuffer("image-data", imageData);
+            }
+            int32_t left, top, right, bottom;
+            if (info->mFormat->findRect("crop", &left, &top, &right, &bottom)) {
+                info->mData->meta()->setRect("crop-rect", left, top, right, bottom);
+            }
+        }
     }
 
     return index;
@@ -2054,4 +2155,34 @@
     return OK;
 }
 
+void MediaCodec::updateBatteryStat() {
+    if (mState == CONFIGURED && !mBatteryStatNotified) {
+        AString mime;
+        CHECK(mOutputFormat != NULL &&
+                mOutputFormat->findString("mime", &mime));
+
+        mIsVideo = mime.startsWithIgnoreCase("video/");
+
+        BatteryNotifier& notifier(BatteryNotifier::getInstance());
+
+        if (mIsVideo) {
+            notifier.noteStartVideo();
+        } else {
+            notifier.noteStartAudio();
+        }
+
+        mBatteryStatNotified = true;
+    } else if (mState == UNINITIALIZED && mBatteryStatNotified) {
+        BatteryNotifier& notifier(BatteryNotifier::getInstance());
+
+        if (mIsVideo) {
+            notifier.noteStopVideo();
+        } else {
+            notifier.noteStopAudio();
+        }
+
+        mBatteryStatNotified = false;
+    }
+}
+
 }  // namespace android
diff --git a/media/libstagefright/MediaCodecList.cpp b/media/libstagefright/MediaCodecList.cpp
index cd51582..d021533 100644
--- a/media/libstagefright/MediaCodecList.cpp
+++ b/media/libstagefright/MediaCodecList.cpp
@@ -21,6 +21,7 @@
 #include <media/stagefright/MediaCodecList.h>
 
 #include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AMessage.h>
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/OMXClient.h>
 #include <media/stagefright/OMXCodec.h>
@@ -79,6 +80,19 @@
                   info->mName.c_str());
 
             mCodecInfos.removeAt(i);
+#if LOG_NDEBUG == 0
+        } else {
+            for (size_t type_ix = 0; type_ix < mTypes.size(); ++type_ix) {
+                uint32_t typeMask = 1ul << mTypes.valueAt(type_ix);
+                if (info->mTypes & typeMask) {
+                    AString mime = mTypes.keyAt(type_ix);
+                    uint32_t bit = mTypes.valueAt(type_ix);
+
+                    ALOGV("%s codec info for %s: %s", info->mName.c_str(), mime.c_str(),
+                            info->mCaps.editValueFor(bit)->debugString().c_str());
+                }
+            }
+#endif
         }
     }
 
@@ -217,6 +231,8 @@
         return;
     }
 
+    bool inType = true;
+
     if (!strcmp(name, "Include")) {
         mInitCheck = includeXMLFile(attrs);
         if (mInitCheck == OK) {
@@ -267,6 +283,26 @@
                 mInitCheck = addQuirk(attrs);
             } else if (!strcmp(name, "Type")) {
                 mInitCheck = addTypeFromAttributes(attrs);
+                mCurrentSection =
+                    (mCurrentSection == SECTION_DECODER
+                            ? SECTION_DECODER_TYPE : SECTION_ENCODER_TYPE);
+            }
+        }
+        inType = false;
+        // fall through
+
+        case SECTION_DECODER_TYPE:
+        case SECTION_ENCODER_TYPE:
+        {
+            CodecInfo *info = &mCodecInfos.editItemAt(mCodecInfos.size() - 1);
+            // ignore limits and features specified outside of type
+            bool outside = !inType && info->mSoleType == 0;
+            if (outside && (!strcmp(name, "Limit") || !strcmp(name, "Feature"))) {
+                ALOGW("ignoring %s specified outside of a Type", name);
+            } else if (!strcmp(name, "Limit")) {
+                mInitCheck = addLimit(attrs);
+            } else if (!strcmp(name, "Feature")) {
+                mInitCheck = addFeature(attrs);
             }
             break;
         }
@@ -300,10 +336,27 @@
             break;
         }
 
+        case SECTION_DECODER_TYPE:
+        case SECTION_ENCODER_TYPE:
+        {
+            if (!strcmp(name, "Type")) {
+                mCurrentSection =
+                    (mCurrentSection == SECTION_DECODER_TYPE
+                            ? SECTION_DECODER : SECTION_ENCODER);
+
+                CodecInfo *info = &mCodecInfos.editItemAt(mCodecInfos.size() - 1);
+                info->mCurrentCaps = NULL;
+            }
+            break;
+        }
+
         case SECTION_DECODER:
         {
             if (!strcmp(name, "MediaCodec")) {
                 mCurrentSection = SECTION_DECODERS;
+
+                CodecInfo *info = &mCodecInfos.editItemAt(mCodecInfos.size() - 1);
+                info->mCurrentCaps = NULL;
             }
             break;
         }
@@ -312,6 +365,9 @@
         {
             if (!strcmp(name, "MediaCodec")) {
                 mCurrentSection = SECTION_ENCODERS;
+
+                CodecInfo *info = &mCodecInfos.editItemAt(mCodecInfos.size() - 1);
+                info->mCurrentCaps = NULL;
             }
             break;
         }
@@ -373,11 +429,16 @@
     CodecInfo *info = &mCodecInfos.editItemAt(mCodecInfos.size() - 1);
     info->mName = name;
     info->mIsEncoder = encoder;
+    info->mSoleType = 0;
     info->mTypes = 0;
     info->mQuirks = 0;
+    info->mCurrentCaps = NULL;
 
     if (type != NULL) {
         addType(type);
+        // if type was specified in attributes, we do not allow
+        // subsequent types
+        info->mSoleType = info->mTypes;
     }
 }
 
@@ -427,6 +488,12 @@
 status_t MediaCodecList::addTypeFromAttributes(const char **attrs) {
     const char *name = NULL;
 
+    CodecInfo *info = &mCodecInfos.editItemAt(mCodecInfos.size() - 1);
+    if (info->mSoleType != 0) {
+        ALOGE("Codec '%s' already had its type specified", info->mName.c_str());
+        return -EINVAL;
+    }
+
     size_t i = 0;
     while (attrs[i] != NULL) {
         if (!strcmp(attrs[i], "name")) {
@@ -469,6 +536,11 @@
 
     CodecInfo *info = &mCodecInfos.editItemAt(mCodecInfos.size() - 1);
     info->mTypes |= 1ul << bit;
+    if (info->mCaps.indexOfKey(bit) < 0) {
+        AMessage *msg = new AMessage();
+        info->mCaps.add(bit, msg);
+    }
+    info->mCurrentCaps = info->mCaps.editValueFor(bit);
 }
 
 ssize_t MediaCodecList::findCodecByType(
@@ -494,6 +566,216 @@
     return -ENOENT;
 }
 
+static status_t limitFoundMissingAttr(AString name, const char *attr, bool found = true) {
+    ALOGE("limit '%s' with %s'%s' attribute", name.c_str(),
+            (found ? "" : "no "), attr);
+    return -EINVAL;
+}
+
+static status_t limitError(AString name, const char *msg) {
+    ALOGE("limit '%s' %s", name.c_str(), msg);
+    return -EINVAL;
+}
+
+static status_t limitInvalidAttr(AString name, const char *attr, AString value) {
+    ALOGE("limit '%s' with invalid '%s' attribute (%s)", name.c_str(),
+            attr, value.c_str());
+    return -EINVAL;
+}
+
+status_t MediaCodecList::addLimit(const char **attrs) {
+    sp<AMessage> msg = new AMessage();
+
+    size_t i = 0;
+    while (attrs[i] != NULL) {
+        if (attrs[i + 1] == NULL) {
+            return -EINVAL;
+        }
+
+        // attributes with values
+        if (!strcmp(attrs[i], "name")
+                || !strcmp(attrs[i], "default")
+                || !strcmp(attrs[i], "in")
+                || !strcmp(attrs[i], "max")
+                || !strcmp(attrs[i], "min")
+                || !strcmp(attrs[i], "range")
+                || !strcmp(attrs[i], "ranges")
+                || !strcmp(attrs[i], "scale")
+                || !strcmp(attrs[i], "value")) {
+            msg->setString(attrs[i], attrs[i + 1]);
+            ++i;
+        } else {
+            return -EINVAL;
+        }
+        ++i;
+    }
+
+    AString name;
+    if (!msg->findString("name", &name)) {
+        ALOGE("limit with no 'name' attribute");
+        return -EINVAL;
+    }
+
+    CodecInfo *info = &mCodecInfos.editItemAt(mCodecInfos.size() - 1);
+
+    // size, blocks, bitrate, frame-rate, blocks-per-second, aspect-ratio: range
+    // quality: range + default + [scale]
+    // complexity: range + default
+    bool found;
+    if (name == "aspect-ratio" || name == "bitrate" || name == "block-count"
+            || name == "blocks-per-second" || name == "complexity"
+            || name == "frame-rate" || name == "quality" || name == "size") {
+        AString min, max;
+        if (msg->findString("min", &min) && msg->findString("max", &max)) {
+            min.append("-");
+            min.append(max);
+            if (msg->contains("range") || msg->contains("value")) {
+                return limitError(name, "has 'min' and 'max' as well as 'range' or "
+                        "'value' attributes");
+            }
+            msg->setString("range", min);
+        } else if (msg->contains("min") || msg->contains("max")) {
+            return limitError(name, "has only 'min' or 'max' attribute");
+        } else if (msg->findString("value", &max)) {
+            min = max;
+            min.append("-");
+            min.append(max);
+            if (msg->contains("range")) {
+                return limitError(name, "has both 'range' and 'value' attributes");
+            }
+            msg->setString("range", min);
+        }
+
+        AString range, scale = "linear", def, in_;
+        if (!msg->findString("range", &range)) {
+            return limitError(name, "with no 'range', 'value' or 'min'/'max' attributes");
+        }
+
+        if ((name == "quality" || name == "complexity") ^
+                (found = msg->findString("default", &def))) {
+            return limitFoundMissingAttr(name, "default", found);
+        }
+        if (name != "quality" && msg->findString("scale", &scale)) {
+            return limitFoundMissingAttr(name, "scale");
+        }
+        if ((name == "aspect-ratio") ^ (found = msg->findString("in", &in_))) {
+            return limitFoundMissingAttr(name, "in", found);
+        }
+
+        if (name == "aspect-ratio") {
+            if (!(in_ == "pixels") && !(in_ == "blocks")) {
+                return limitInvalidAttr(name, "in", in_);
+            }
+            in_.erase(5, 1); // (pixel|block)-aspect-ratio
+            in_.append("-");
+            in_.append(name);
+            name = in_;
+        }
+        if (name == "quality") {
+            info->mCurrentCaps->setString("quality-scale", scale);
+        }
+        if (name == "quality" || name == "complexity") {
+            AString tag = name;
+            tag.append("-default");
+            info->mCurrentCaps->setString(tag.c_str(), def);
+        }
+        AString tag = name;
+        tag.append("-range");
+        info->mCurrentCaps->setString(tag.c_str(), range);
+    } else {
+        AString max, value, ranges;
+        if (msg->contains("default")) {
+            return limitFoundMissingAttr(name, "default");
+        } else if (msg->contains("in")) {
+            return limitFoundMissingAttr(name, "in");
+        } else if ((name == "channel-count") ^
+                (found = msg->findString("max", &max))) {
+            return limitFoundMissingAttr(name, "max", found);
+        } else if (msg->contains("min")) {
+            return limitFoundMissingAttr(name, "min");
+        } else if (msg->contains("range")) {
+            return limitFoundMissingAttr(name, "range");
+        } else if ((name == "sample-rate") ^
+                (found = msg->findString("ranges", &ranges))) {
+            return limitFoundMissingAttr(name, "ranges", found);
+        } else if (msg->contains("scale")) {
+            return limitFoundMissingAttr(name, "scale");
+        } else if ((name == "alignment" || name == "block-size") ^
+                (found = msg->findString("value", &value))) {
+            return limitFoundMissingAttr(name, "value", found);
+        }
+
+        if (max.size()) {
+            AString tag = "max-";
+            tag.append(name);
+            info->mCurrentCaps->setString(tag.c_str(), max);
+        } else if (value.size()) {
+            info->mCurrentCaps->setString(name.c_str(), value);
+        } else if (ranges.size()) {
+            AString tag = name;
+            tag.append("-ranges");
+            info->mCurrentCaps->setString(tag.c_str(), ranges);
+        } else {
+            ALOGW("Ignoring unrecognized limit '%s'", name.c_str());
+        }
+    }
+    return OK;
+}
+
+static bool parseBoolean(const char *s) {
+    if (!strcasecmp(s, "true") || !strcasecmp(s, "yes") || !strcasecmp(s, "y")) {
+        return true;
+    }
+    char *end;
+    unsigned long res = strtoul(s, &end, 10);
+    return *s != '\0' && *end == '\0' && res > 0;
+}
+
+status_t MediaCodecList::addFeature(const char **attrs) {
+    size_t i = 0;
+    const char *name = NULL;
+    int32_t optional = -1;
+    int32_t required = -1;
+
+    while (attrs[i] != NULL) {
+        if (attrs[i + 1] == NULL) {
+            return -EINVAL;
+        }
+
+        // attributes with values
+        if (!strcmp(attrs[i], "name")) {
+            name = attrs[i + 1];
+            ++i;
+        } else if (!strcmp(attrs[i], "optional") || !strcmp(attrs[i], "required")) {
+            int value = (int)parseBoolean(attrs[i + 1]);
+            if (!strcmp(attrs[i], "optional")) {
+                optional = value;
+            } else {
+                required = value;
+            }
+            ++i;
+        } else {
+            return -EINVAL;
+        }
+        ++i;
+    }
+    if (name == NULL) {
+        ALOGE("feature with no 'name' attribute");
+        return -EINVAL;
+    }
+
+    if (optional == required && optional != -1) {
+        ALOGE("feature '%s' is both/neither optional and required", name);
+        return -EINVAL;
+    }
+
+    CodecInfo *info = &mCodecInfos.editItemAt(mCodecInfos.size() - 1);
+    AString tag = "feature-";
+    tag.append(name);
+    info->mCurrentCaps->setInt32(tag.c_str(), (required == 1) || (optional == 0));
+    return OK;
+}
+
 ssize_t MediaCodecList::findCodecByName(const char *name) const {
     for (size_t i = 0; i < mCodecInfos.size(); ++i) {
         const CodecInfo &info = mCodecInfos.itemAt(i);
@@ -521,7 +803,7 @@
 
 bool MediaCodecList::isEncoder(size_t index) const {
     if (index >= mCodecInfos.size()) {
-        return NULL;
+        return false;
     }
 
     const CodecInfo &info = mCodecInfos.itemAt(index);
@@ -531,7 +813,7 @@
 bool MediaCodecList::codecHasQuirk(
         size_t index, const char *quirkName) const {
     if (index >= mCodecInfos.size()) {
-        return NULL;
+        return false;
     }
 
     const CodecInfo &info = mCodecInfos.itemAt(index);
@@ -571,7 +853,8 @@
         size_t index, const char *type,
         Vector<ProfileLevel> *profileLevels,
         Vector<uint32_t> *colorFormats,
-        uint32_t *flags) const {
+        uint32_t *flags,
+        sp<AMessage> *capabilities) const {
     profileLevels->clear();
     colorFormats->clear();
 
@@ -581,6 +864,13 @@
 
     const CodecInfo &info = mCodecInfos.itemAt(index);
 
+    ssize_t typeIndex = mTypes.indexOfKey(type);
+    if (typeIndex < 0) {
+        return -EINVAL;
+    }
+    // essentially doing valueFor without the CHECK abort
+    typeIndex = mTypes.valueAt(typeIndex);
+
     OMXClient client;
     status_t err = client.connect();
     if (err != OK) {
@@ -611,6 +901,11 @@
 
     *flags = caps.mFlags;
 
+    // TODO this check will be removed once JNI side is merged
+    if (capabilities != NULL) {
+        *capabilities = info.mCaps.valueFor(typeIndex);
+    }
+
     return OK;
 }
 
diff --git a/media/libstagefright/MediaSource.cpp b/media/libstagefright/MediaSource.cpp
index fd0e79c..576471a 100644
--- a/media/libstagefright/MediaSource.cpp
+++ b/media/libstagefright/MediaSource.cpp
@@ -32,6 +32,19 @@
     mOptions = 0;
     mSeekTimeUs = 0;
     mLatenessUs = 0;
+    mNonBlocking = false;
+}
+
+void MediaSource::ReadOptions::setNonBlocking() {
+    mNonBlocking = true;
+}
+
+void MediaSource::ReadOptions::clearNonBlocking() {
+    mNonBlocking = false;
+}
+
+bool MediaSource::ReadOptions::getNonBlocking() const {
+    return mNonBlocking;
 }
 
 void MediaSource::ReadOptions::setSeekTo(int64_t time_us, SeekMode mode) {
diff --git a/media/libstagefright/OggExtractor.cpp b/media/libstagefright/OggExtractor.cpp
index 8c15929..821bd81 100644
--- a/media/libstagefright/OggExtractor.cpp
+++ b/media/libstagefright/OggExtractor.cpp
@@ -320,22 +320,26 @@
     }
 
     size_t left = 0;
-    size_t right = mTableOfContents.size();
-    while (left < right) {
-        size_t center = left / 2 + right / 2 + (left & right & 1);
+    size_t right_plus_one = mTableOfContents.size();
+    while (left < right_plus_one) {
+        size_t center = left + (right_plus_one - left) / 2;
 
         const TOCEntry &entry = mTableOfContents.itemAt(center);
 
         if (timeUs < entry.mTimeUs) {
-            right = center;
+            right_plus_one = center;
         } else if (timeUs > entry.mTimeUs) {
             left = center + 1;
         } else {
-            left = right = center;
+            left = center;
             break;
         }
     }
 
+    if (left == mTableOfContents.size()) {
+        --left;
+    }
+
     const TOCEntry &entry = mTableOfContents.itemAt(left);
 
     ALOGV("seeking to entry %zu / %zu at offset %lld",
diff --git a/media/libstagefright/SampleTable.cpp b/media/libstagefright/SampleTable.cpp
index 9a92805..bdd6d56 100644
--- a/media/libstagefright/SampleTable.cpp
+++ b/media/libstagefright/SampleTable.cpp
@@ -330,6 +330,10 @@
     }
 
     mTimeToSampleCount = U32_AT(&header[4]);
+    uint64_t allocSize = mTimeToSampleCount * 2 * sizeof(uint32_t);
+    if (allocSize > SIZE_MAX) {
+        return ERROR_OUT_OF_RANGE;
+    }
     mTimeToSample = new uint32_t[mTimeToSampleCount * 2];
 
     size_t size = sizeof(uint32_t) * mTimeToSampleCount * 2;
@@ -372,6 +376,11 @@
     }
 
     mNumCompositionTimeDeltaEntries = numEntries;
+    uint64_t allocSize = numEntries * 2 * sizeof(uint32_t);
+    if (allocSize > SIZE_MAX) {
+        return ERROR_OUT_OF_RANGE;
+    }
+
     mCompositionTimeDeltaEntries = new uint32_t[2 * numEntries];
 
     if (mDataSource->readAt(
@@ -417,6 +426,11 @@
         ALOGV("Table of sync samples is empty or has only a single entry!");
     }
 
+    uint64_t allocSize = mNumSyncSamples * sizeof(uint32_t);
+    if (allocSize > SIZE_MAX) {
+        return ERROR_OUT_OF_RANGE;
+    }
+
     mSyncSamples = new uint32_t[mNumSyncSamples];
     size_t size = mNumSyncSamples * sizeof(uint32_t);
     if (mDataSource->readAt(mSyncSampleOffset + 8, mSyncSamples, size)
@@ -520,83 +534,72 @@
 }
 
 status_t SampleTable::findSampleAtTime(
-        uint32_t req_time, uint32_t *sample_index, uint32_t flags) {
+        uint64_t req_time, uint64_t scale_num, uint64_t scale_den,
+        uint32_t *sample_index, uint32_t flags) {
     buildSampleEntriesTable();
 
     uint32_t left = 0;
-    uint32_t right = mNumSampleSizes;
-    while (left < right) {
-        uint32_t center = (left + right) / 2;
-        uint32_t centerTime = mSampleTimeEntries[center].mCompositionTime;
+    uint32_t right_plus_one = mNumSampleSizes;
+    while (left < right_plus_one) {
+        uint32_t center = left + (right_plus_one - left) / 2;
+        uint64_t centerTime =
+            getSampleTime(center, scale_num, scale_den);
 
         if (req_time < centerTime) {
-            right = center;
+            right_plus_one = center;
         } else if (req_time > centerTime) {
             left = center + 1;
         } else {
-            left = center;
-            break;
+            *sample_index = mSampleTimeEntries[center].mSampleIndex;
+            return OK;
         }
     }
 
-    if (left == mNumSampleSizes) {
-        if (flags == kFlagAfter) {
-            return ERROR_OUT_OF_RANGE;
-        }
-
-        --left;
-    }
-
     uint32_t closestIndex = left;
 
+    if (closestIndex == mNumSampleSizes) {
+        if (flags == kFlagAfter) {
+            return ERROR_OUT_OF_RANGE;
+        }
+        flags = kFlagBefore;
+    } else if (closestIndex == 0) {
+        if (flags == kFlagBefore) {
+            // normally we should return out of range, but that is
+            // treated as end-of-stream.  instead return first sample
+            //
+            // return ERROR_OUT_OF_RANGE;
+        }
+        flags = kFlagAfter;
+    }
+
     switch (flags) {
         case kFlagBefore:
         {
-            while (closestIndex > 0
-                    && mSampleTimeEntries[closestIndex].mCompositionTime
-                            > req_time) {
-                --closestIndex;
-            }
+            --closestIndex;
             break;
         }
 
         case kFlagAfter:
         {
-            while (closestIndex + 1 < mNumSampleSizes
-                    && mSampleTimeEntries[closestIndex].mCompositionTime
-                            < req_time) {
-                ++closestIndex;
-            }
+            // nothing to do
             break;
         }
 
         default:
         {
             CHECK(flags == kFlagClosest);
-
-            if (closestIndex > 0) {
-                // Check left neighbour and pick closest.
-                uint32_t absdiff1 =
-                    abs_difference(
-                            mSampleTimeEntries[closestIndex].mCompositionTime,
-                            req_time);
-
-                uint32_t absdiff2 =
-                    abs_difference(
-                            mSampleTimeEntries[closestIndex - 1].mCompositionTime,
-                            req_time);
-
-                if (absdiff1 > absdiff2) {
-                    closestIndex = closestIndex - 1;
-                }
+            // pick closest based on timestamp. use abs_difference for safety
+            if (abs_difference(
+                    getSampleTime(closestIndex, scale_num, scale_den), req_time) >
+                abs_difference(
+                    req_time, getSampleTime(closestIndex - 1, scale_num, scale_den))) {
+                --closestIndex;
             }
-
             break;
         }
     }
 
     *sample_index = mSampleTimeEntries[closestIndex].mSampleIndex;
-
     return OK;
 }
 
@@ -618,109 +621,85 @@
     }
 
     uint32_t left = 0;
-    uint32_t right = mNumSyncSamples;
-    while (left < right) {
-        uint32_t center = left + (right - left) / 2;
+    uint32_t right_plus_one = mNumSyncSamples;
+    while (left < right_plus_one) {
+        uint32_t center = left + (right_plus_one - left) / 2;
         uint32_t x = mSyncSamples[center];
 
         if (start_sample_index < x) {
-            right = center;
+            right_plus_one = center;
         } else if (start_sample_index > x) {
             left = center + 1;
         } else {
-            left = center;
-            break;
+            *sample_index = x;
+            return OK;
         }
     }
+
     if (left == mNumSyncSamples) {
         if (flags == kFlagAfter) {
             ALOGE("tried to find a sync frame after the last one: %d", left);
             return ERROR_OUT_OF_RANGE;
         }
-        left = left - 1;
+        flags = kFlagBefore;
+    }
+    else if (left == 0) {
+        if (flags == kFlagBefore) {
+            ALOGE("tried to find a sync frame before the first one: %d", left);
+
+            // normally we should return out of range, but that is
+            // treated as end-of-stream.  instead seek to first sync
+            //
+            // return ERROR_OUT_OF_RANGE;
+        }
+        flags = kFlagAfter;
     }
 
-    // Now ssi[left] is the sync sample index just before (or at)
-    // start_sample_index.
-    // Also start_sample_index < ssi[left + 1], if left + 1 < mNumSyncSamples.
-
-    uint32_t x = mSyncSamples[left];
-
-    if (left + 1 < mNumSyncSamples) {
-        uint32_t y = mSyncSamples[left + 1];
-
-        // our sample lies between sync samples x and y.
-
-        status_t err = mSampleIterator->seekTo(start_sample_index);
-        if (err != OK) {
-            return err;
-        }
-
-        uint32_t sample_time = mSampleIterator->getSampleTime();
-
-        err = mSampleIterator->seekTo(x);
-        if (err != OK) {
-            return err;
-        }
-        uint32_t x_time = mSampleIterator->getSampleTime();
-
-        err = mSampleIterator->seekTo(y);
-        if (err != OK) {
-            return err;
-        }
-
-        uint32_t y_time = mSampleIterator->getSampleTime();
-
-        if (abs_difference(x_time, sample_time)
-                > abs_difference(y_time, sample_time)) {
-            // Pick the sync sample closest (timewise) to the start-sample.
-            x = y;
-            ++left;
-        }
-    }
-
+    // Now ssi[left - 1] <(=) start_sample_index <= ssi[left]
     switch (flags) {
         case kFlagBefore:
         {
-            if (x > start_sample_index) {
-                CHECK(left > 0);
-
-                x = mSyncSamples[left - 1];
-
-                if (x > start_sample_index) {
-                    // The table of sync sample indices was not sorted
-                    // properly.
-                    return ERROR_MALFORMED;
-                }
-            }
+            --left;
             break;
         }
-
         case kFlagAfter:
         {
-            if (x < start_sample_index) {
-                if (left + 1 >= mNumSyncSamples) {
-                    return ERROR_OUT_OF_RANGE;
-                }
-
-                x = mSyncSamples[left + 1];
-
-                if (x < start_sample_index) {
-                    // The table of sync sample indices was not sorted
-                    // properly.
-                    return ERROR_MALFORMED;
-                }
-            }
-
+            // nothing to do
             break;
         }
-
         default:
+        {
+            // this route is not used, but implement it nonetheless
+            CHECK(flags == kFlagClosest);
+
+            status_t err = mSampleIterator->seekTo(start_sample_index);
+            if (err != OK) {
+                return err;
+            }
+            uint32_t sample_time = mSampleIterator->getSampleTime();
+
+            err = mSampleIterator->seekTo(mSyncSamples[left]);
+            if (err != OK) {
+                return err;
+            }
+            uint32_t upper_time = mSampleIterator->getSampleTime();
+
+            err = mSampleIterator->seekTo(mSyncSamples[left - 1]);
+            if (err != OK) {
+                return err;
+            }
+            uint32_t lower_time = mSampleIterator->getSampleTime();
+
+            // use abs_difference for safety
+            if (abs_difference(upper_time, sample_time) >
+                abs_difference(sample_time, lower_time)) {
+                --left;
+            }
             break;
+        }
     }
 
-    *sample_index = x;
-
+    *sample_index = mSyncSamples[left];
     return OK;
 }
 
diff --git a/media/libstagefright/Utils.cpp b/media/libstagefright/Utils.cpp
index 750bff0..587e264 100644
--- a/media/libstagefright/Utils.cpp
+++ b/media/libstagefright/Utils.cpp
@@ -142,6 +142,11 @@
         msg->setInt32("max-input-size", maxInputSize);
     }
 
+    int32_t rotationDegrees;
+    if (meta->findInt32(kKeyRotation, &rotationDegrees)) {
+        msg->setInt32("rotation-degrees", rotationDegrees);
+    }
+
     uint32_t type;
     const void *data;
     size_t size;
diff --git a/media/libstagefright/foundation/ABuffer.cpp b/media/libstagefright/foundation/ABuffer.cpp
index 6173db4..c93c7e8 100644
--- a/media/libstagefright/foundation/ABuffer.cpp
+++ b/media/libstagefright/foundation/ABuffer.cpp
@@ -40,6 +40,14 @@
       mOwnsData(false) {
 }
 
+// static
+sp<ABuffer> ABuffer::CreateAsCopy(const void *data, size_t capacity)
+{
+    sp<ABuffer> res = new ABuffer(capacity);
+    memcpy(res->data(), data, capacity);
+    return res;
+}
+
 ABuffer::~ABuffer() {
     if (mOwnsData) {
         if (mData != NULL) {
diff --git a/media/libstagefright/foundation/AMessage.cpp b/media/libstagefright/foundation/AMessage.cpp
index dc42f91..d268aa4 100644
--- a/media/libstagefright/foundation/AMessage.cpp
+++ b/media/libstagefright/foundation/AMessage.cpp
@@ -127,6 +127,20 @@
     return NULL;
 }
 
+bool AMessage::contains(const char *name) const {
+    name = AAtomizer::Atomize(name);
+
+    for (size_t i = 0; i < mNumItems; ++i) {
+        const Item *item = &mItems[i];
+
+        if (item->mName == name) {
+            return true;
+        }
+    }
+
+    return false;
+}
+
 #define BASIC_TYPE(NAME,FIELDNAME,TYPENAME)                             \
 void AMessage::set##NAME(const char *name, TYPENAME value) {            \
     Item *item = allocateItem(name);                                    \
@@ -160,6 +174,11 @@
     item->u.stringValue = new AString(s, len < 0 ? strlen(s) : len);
 }
 
+void AMessage::setString(
+        const char *name, const AString &s) {
+    setString(name, s.c_str(), s.size());
+}
+
 void AMessage::setObjectInternal(
         const char *name, const sp<RefBase> &obj, Type type) {
     Item *item = allocateItem(name);
diff --git a/media/libstagefright/include/SampleTable.h b/media/libstagefright/include/SampleTable.h
index fe146f2..d06df7b 100644
--- a/media/libstagefright/include/SampleTable.h
+++ b/media/libstagefright/include/SampleTable.h
@@ -75,7 +75,8 @@
         kFlagClosest
     };
     status_t findSampleAtTime(
-            uint32_t req_time, uint32_t *sample_index, uint32_t flags);
+            uint64_t req_time, uint64_t scale_num, uint64_t scale_den,
+            uint32_t *sample_index, uint32_t flags);
 
     status_t findSyncSampleNear(
             uint32_t start_sample_index, uint32_t *sample_index,
@@ -138,6 +139,13 @@
 
     friend struct SampleIterator;
 
+    // normally we don't round
+    inline uint64_t getSampleTime(
+            size_t sample_index, uint64_t scale_num, uint64_t scale_den) const {
+        return (mSampleTimeEntries[sample_index].mCompositionTime
+            * scale_num) / scale_den;
+    }
+
     status_t getSampleSize_l(uint32_t sample_index, size_t *sample_size);
     uint32_t getCompositionTimeOffset(uint32_t sampleIndex);
 
diff --git a/media/libstagefright/mpeg2ts/ATSParser.cpp b/media/libstagefright/mpeg2ts/ATSParser.cpp
index 3d241e0..eda6387 100644
--- a/media/libstagefright/mpeg2ts/ATSParser.cpp
+++ b/media/libstagefright/mpeg2ts/ATSParser.cpp
@@ -503,11 +503,7 @@
                     ElementaryStreamQueue::MPEG4_VIDEO);
             break;
 
-        case STREAMTYPE_PCM_AUDIO:
-            mQueue = new ElementaryStreamQueue(
-                    ElementaryStreamQueue::PCM_AUDIO);
-            break;
-
+        case STREAMTYPE_LPCM_AC3:
         case STREAMTYPE_AC3:
             mQueue = new ElementaryStreamQueue(
                     ElementaryStreamQueue::AC3);
@@ -622,7 +618,7 @@
         case STREAMTYPE_MPEG1_AUDIO:
         case STREAMTYPE_MPEG2_AUDIO:
         case STREAMTYPE_MPEG2_AUDIO_ADTS:
-        case STREAMTYPE_PCM_AUDIO:
+        case STREAMTYPE_LPCM_AC3:
         case STREAMTYPE_AC3:
             return true;
 
diff --git a/media/libstagefright/mpeg2ts/ATSParser.h b/media/libstagefright/mpeg2ts/ATSParser.h
index 86b025f..8986a22 100644
--- a/media/libstagefright/mpeg2ts/ATSParser.h
+++ b/media/libstagefright/mpeg2ts/ATSParser.h
@@ -93,7 +93,9 @@
         // From ATSC A/53 Part 3:2009, 6.7.1
         STREAMTYPE_AC3                  = 0x81,
 
-        STREAMTYPE_PCM_AUDIO            = 0x83,
+        // Stream type 0x83 is non-standard,
+        // it could be LPCM or TrueHD AC3
+        STREAMTYPE_LPCM_AC3             = 0x83,
     };
 
 protected:
diff --git a/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp b/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp
index 871824a..72c9dae 100644
--- a/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp
+++ b/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp
@@ -34,6 +34,7 @@
 
 AnotherPacketSource::AnotherPacketSource(const sp<MetaData> &meta)
     : mIsAudio(false),
+      mIsVideo(false),
       mFormat(NULL),
       mLastQueuedTimeUs(0),
       mEOSResult(OK),
@@ -45,6 +46,7 @@
     CHECK(mFormat == NULL);
 
     mIsAudio = false;
+    mIsVideo = false;
 
     if (meta == NULL) {
         return;
@@ -56,8 +58,10 @@
 
     if (!strncasecmp("audio/", mime, 6)) {
         mIsAudio = true;
+    } else  if (!strncasecmp("video/", mime, 6)) {
+        mIsVideo = true;
     } else {
-        CHECK(!strncasecmp("video/", mime, 6));
+        CHECK(!strncasecmp("text/", mime, 5));
     }
 }
 
@@ -175,7 +179,11 @@
         return (discontinuityType & ATSParser::DISCONTINUITY_AUDIO_FORMAT) != 0;
     }
 
-    return (discontinuityType & ATSParser::DISCONTINUITY_VIDEO_FORMAT) != 0;
+    if (mIsVideo) {
+        return (discontinuityType & ATSParser::DISCONTINUITY_VIDEO_FORMAT) != 0;
+    }
+
+    return false;
 }
 
 void AnotherPacketSource::queueAccessUnit(const sp<ABuffer> &buffer) {
@@ -230,6 +238,11 @@
             int32_t oldDiscontinuityType;
             if (!oldBuffer->meta()->findInt32(
                         "discontinuity", &oldDiscontinuityType)) {
+                MediaBuffer *mbuf = NULL;
+                oldBuffer->meta()->findPointer("mediaBuffer", (void**)&mbuf);
+                if (mbuf != NULL) {
+                    mbuf->release();
+                }
                 it = mBuffers.erase(it);
                 continue;
             }
diff --git a/media/libstagefright/mpeg2ts/AnotherPacketSource.h b/media/libstagefright/mpeg2ts/AnotherPacketSource.h
index 06c49bd..f38f9dc 100644
--- a/media/libstagefright/mpeg2ts/AnotherPacketSource.h
+++ b/media/libstagefright/mpeg2ts/AnotherPacketSource.h
@@ -74,6 +74,7 @@
     Condition mCondition;
 
     bool mIsAudio;
+    bool mIsVideo;
     sp<MetaData> mFormat;
     int64_t mLastQueuedTimeUs;
     List<sp<ABuffer> > mBuffers;
diff --git a/media/libstagefright/omx/OMX.cpp b/media/libstagefright/omx/OMX.cpp
index 22b12d9..cc4770a 100644
--- a/media/libstagefright/omx/OMX.cpp
+++ b/media/libstagefright/omx/OMX.cpp
@@ -233,7 +233,7 @@
             instance, &handle);
 
     if (err != OMX_ErrorNone) {
-        ALOGV("FAILED to allocate omx component '%s'", name);
+        ALOGE("FAILED to allocate omx component '%s'", name);
 
         instance->onGetHandleFailed();
 
diff --git a/media/libstagefright/webm/WebmWriter.h b/media/libstagefright/webm/WebmWriter.h
index 529dec8..36b6965 100644
--- a/media/libstagefright/webm/WebmWriter.h
+++ b/media/libstagefright/webm/WebmWriter.h
@@ -41,14 +41,14 @@
     ~WebmWriter() { reset(); }
 
 
-    status_t addSource(const sp<MediaSource> &source);
-    status_t start(MetaData *param = NULL);
-    status_t stop();
-    status_t pause();
-    bool reachedEOS();
+    virtual status_t addSource(const sp<MediaSource> &source);
+    virtual status_t start(MetaData *param = NULL);
+    virtual status_t stop();
+    virtual status_t pause();
+    virtual bool reachedEOS();
 
-    void setStartTimeOffsetMs(int ms) { mStartTimeOffsetMs = ms; }
-    int32_t getStartTimeOffsetMs() const { return mStartTimeOffsetMs; }
+    virtual void setStartTimeOffsetMs(int ms) { mStartTimeOffsetMs = ms; }
+    virtual int32_t getStartTimeOffsetMs() const { return mStartTimeOffsetMs; }
 
 private:
     int mFd;
diff --git a/media/mtp/Android.mk b/media/mtp/Android.mk
index ac608a1..3af0956 100644
--- a/media/mtp/Android.mk
+++ b/media/mtp/Android.mk
@@ -39,9 +39,6 @@
 
 LOCAL_CFLAGS := -DMTP_DEVICE -DMTP_HOST
 
-# Needed for <bionic_time.h>
-LOCAL_C_INCLUDES := bionic/libc/private
-
 LOCAL_SHARED_LIBRARIES := libutils libcutils liblog libusbhost libbinder
 
 include $(BUILD_SHARED_LIBRARY)
diff --git a/media/mtp/MtpUtils.cpp b/media/mtp/MtpUtils.cpp
index 6ec8876..0667bdd 100644
--- a/media/mtp/MtpUtils.cpp
+++ b/media/mtp/MtpUtils.cpp
@@ -19,7 +19,8 @@
 #include <stdio.h>
 #include <time.h>
 
-#include <cutils/tztime.h>
+#include <../private/bionic_time.h> /* TODO: switch this code to icu4c! */
+
 #include "MtpUtils.h"
 
 namespace android {
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 5fd7ce8..bd7121e 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -1097,7 +1097,7 @@
 
     AutoMutex lock(mHardwareLock);
     mHardwareStatus = AUDIO_HW_GET_INPUT_BUFFER_SIZE;
-    struct audio_config config;
+    audio_config_t config;
     memset(&config, 0, sizeof(config));
     config.sample_rate = sampleRate;
     config.channel_mask = channelMask;
@@ -1531,7 +1531,7 @@
     }
 
     audio_module_handle_t handle = nextUniqueId();
-    mAudioHwDevs.add(handle, new AudioHwDevice(name, dev, flags));
+    mAudioHwDevs.add(handle, new AudioHwDevice(handle, name, dev, flags));
 
     ALOGI("loadHwModule() Loaded %s audio interface from %s (%s) handle %d",
           name, dev->common.module->name, dev->common.module->id, handle);
@@ -1575,110 +1575,114 @@
 
 // ----------------------------------------------------------------------------
 
-audio_io_handle_t AudioFlinger::openOutput(audio_module_handle_t module,
-                                           audio_devices_t *pDevices,
-                                           uint32_t *pSamplingRate,
-                                           audio_format_t *pFormat,
-                                           audio_channel_mask_t *pChannelMask,
-                                           uint32_t *pLatencyMs,
-                                           audio_output_flags_t flags,
-                                           const audio_offload_info_t *offloadInfo)
+
+sp<AudioFlinger::PlaybackThread> AudioFlinger::openOutput_l(audio_module_handle_t module,
+                                                            audio_io_handle_t *output,
+                                                            audio_config_t *config,
+                                                            audio_devices_t devices,
+                                                            const String8& address,
+                                                            audio_output_flags_t flags)
 {
-    struct audio_config config;
-    memset(&config, 0, sizeof(config));
-    config.sample_rate = (pSamplingRate != NULL) ? *pSamplingRate : 0;
-    config.channel_mask = (pChannelMask != NULL) ? *pChannelMask : 0;
-    config.format = (pFormat != NULL) ? *pFormat : AUDIO_FORMAT_DEFAULT;
-    if (offloadInfo != NULL) {
-        config.offload_info = *offloadInfo;
-    }
-
-    ALOGV("openOutput(), module %d Device %x, SamplingRate %d, Format %#08x, Channels %x, flags %x",
-              module,
-              (pDevices != NULL) ? *pDevices : 0,
-              config.sample_rate,
-              config.format,
-              config.channel_mask,
-              flags);
-    ALOGV("openOutput(), offloadInfo %p version 0x%04x",
-          offloadInfo, offloadInfo == NULL ? -1 : offloadInfo->version);
-
-    if (pDevices == NULL || *pDevices == AUDIO_DEVICE_NONE) {
-        return AUDIO_IO_HANDLE_NONE;
-    }
-
-    Mutex::Autolock _l(mLock);
-
-    AudioHwDevice *outHwDev = findSuitableHwDev_l(module, *pDevices);
+    AudioHwDevice *outHwDev = findSuitableHwDev_l(module, devices);
     if (outHwDev == NULL) {
-        return AUDIO_IO_HANDLE_NONE;
+        return 0;
     }
 
     audio_hw_device_t *hwDevHal = outHwDev->hwDevice();
-    audio_io_handle_t id = nextUniqueId();
+    if (*output == AUDIO_IO_HANDLE_NONE) {
+        *output = nextUniqueId();
+    }
 
     mHardwareStatus = AUDIO_HW_OUTPUT_OPEN;
 
     audio_stream_out_t *outStream = NULL;
 
     // FOR TESTING ONLY:
-    // Enable increased sink precision for mixing mode if kEnableExtendedPrecision is true.
-    if (kEnableExtendedPrecision &&  // Check only for Normal Mixing mode
-            !(flags & (AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD | AUDIO_OUTPUT_FLAG_DIRECT))) {
-        // Update format
-        //config.format = AUDIO_FORMAT_PCM_FLOAT;
-        //config.format = AUDIO_FORMAT_PCM_24_BIT_PACKED;
-        //config.format = AUDIO_FORMAT_PCM_32_BIT;
-        //config.format = AUDIO_FORMAT_PCM_8_24_BIT;
-        // ALOGV("openOutput() upgrading format to %#08x", config.format);
+    // This if statement allows overriding the audio policy settings
+    // and forcing a specific format or channel mask to the HAL/Sink device for testing.
+    if (!(flags & (AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD | AUDIO_OUTPUT_FLAG_DIRECT))) {
+        // Check only for Normal Mixing mode
+        if (kEnableExtendedPrecision) {
+            // Specify format (uncomment one below to choose)
+            //config->format = AUDIO_FORMAT_PCM_FLOAT;
+            //config->format = AUDIO_FORMAT_PCM_24_BIT_PACKED;
+            //config->format = AUDIO_FORMAT_PCM_32_BIT;
+            //config->format = AUDIO_FORMAT_PCM_8_24_BIT;
+            // ALOGV("openOutput_l() upgrading format to %#08x", config->format);
+        }
+        if (kEnableExtendedChannels) {
+            // Specify channel mask (uncomment one below to choose)
+            //config->channel_mask = audio_channel_out_mask_from_count(4);  // for USB 4ch
+            //config->channel_mask = audio_channel_mask_from_representation_and_bits(
+            //        AUDIO_CHANNEL_REPRESENTATION_INDEX, (1 << 4) - 1);  // another 4ch example
+        }
     }
 
     status_t status = hwDevHal->open_output_stream(hwDevHal,
-                                          id,
-                                          *pDevices,
-                                          (audio_output_flags_t)flags,
-                                          &config,
-                                          &outStream);
+                                                   *output,
+                                                   devices,
+                                                   flags,
+                                                   config,
+                                                   &outStream,
+                                                   address.string());
 
     mHardwareStatus = AUDIO_HW_IDLE;
-    ALOGV("openOutput() openOutputStream returned output %p, SamplingRate %d, Format %#08x, "
-            "Channels %x, status %d",
+    ALOGV("openOutput_l() openOutputStream returned output %p, sampleRate %d, Format %#x, "
+            "channelMask %#x, status %d",
             outStream,
-            config.sample_rate,
-            config.format,
-            config.channel_mask,
+            config->sample_rate,
+            config->format,
+            config->channel_mask,
             status);
 
     if (status == NO_ERROR && outStream != NULL) {
-        AudioStreamOut *output = new AudioStreamOut(outHwDev, outStream, flags);
+        AudioStreamOut *outputStream = new AudioStreamOut(outHwDev, outStream, flags);
 
         PlaybackThread *thread;
         if (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
-            thread = new OffloadThread(this, output, id, *pDevices);
-            ALOGV("openOutput() created offload output: ID %d thread %p", id, thread);
+            thread = new OffloadThread(this, outputStream, *output, devices);
+            ALOGV("openOutput_l() created offload output: ID %d thread %p", *output, thread);
         } else if ((flags & AUDIO_OUTPUT_FLAG_DIRECT)
-                || !isValidPcmSinkFormat(config.format)
-                || (config.channel_mask != AUDIO_CHANNEL_OUT_STEREO)) {
-            thread = new DirectOutputThread(this, output, id, *pDevices);
-            ALOGV("openOutput() created direct output: ID %d thread %p", id, thread);
+                || !isValidPcmSinkFormat(config->format)
+                || !isValidPcmSinkChannelMask(config->channel_mask)) {
+            thread = new DirectOutputThread(this, outputStream, *output, devices);
+            ALOGV("openOutput_l() created direct output: ID %d thread %p", *output, thread);
         } else {
-            thread = new MixerThread(this, output, id, *pDevices);
-            ALOGV("openOutput() created mixer output: ID %d thread %p", id, thread);
+            thread = new MixerThread(this, outputStream, *output, devices);
+            ALOGV("openOutput_l() created mixer output: ID %d thread %p", *output, thread);
         }
-        mPlaybackThreads.add(id, thread);
+        mPlaybackThreads.add(*output, thread);
+        return thread;
+    }
 
-        if (pSamplingRate != NULL) {
-            *pSamplingRate = config.sample_rate;
-        }
-        if (pFormat != NULL) {
-            *pFormat = config.format;
-        }
-        if (pChannelMask != NULL) {
-            *pChannelMask = config.channel_mask;
-        }
-        if (pLatencyMs != NULL) {
-            *pLatencyMs = thread->latency();
-        }
+    return 0;
+}
+
+status_t AudioFlinger::openOutput(audio_module_handle_t module,
+                                  audio_io_handle_t *output,
+                                  audio_config_t *config,
+                                  audio_devices_t *devices,
+                                  const String8& address,
+                                  uint32_t *latencyMs,
+                                  audio_output_flags_t flags)
+{
+    ALOGV("openOutput(), module %d Device %x, SamplingRate %d, Format %#08x, Channels %x, flags %x",
+              module,
+              (devices != NULL) ? *devices : 0,
+              config->sample_rate,
+              config->format,
+              config->channel_mask,
+              flags);
+
+    if (*devices == AUDIO_DEVICE_NONE) {
+        return BAD_VALUE;
+    }
+
+    Mutex::Autolock _l(mLock);
+
+    sp<PlaybackThread> thread = openOutput_l(module, output, config, *devices, address, flags);
+    if (thread != 0) {
+        *latencyMs = thread->latency();
 
         // notify client processes of the new output creation
         thread->audioConfigChanged(AudioSystem::OUTPUT_OPENED);
@@ -1686,19 +1690,19 @@
         // the first primary output opened designates the primary hw device
         if ((mPrimaryHardwareDev == NULL) && (flags & AUDIO_OUTPUT_FLAG_PRIMARY)) {
             ALOGI("Using module %d has the primary audio interface", module);
-            mPrimaryHardwareDev = outHwDev;
+            mPrimaryHardwareDev = thread->getOutput()->audioHwDev;
 
             AutoMutex lock(mHardwareLock);
             mHardwareStatus = AUDIO_HW_SET_MODE;
-            hwDevHal->set_mode(hwDevHal, mMode);
+            mPrimaryHardwareDev->hwDevice()->set_mode(mPrimaryHardwareDev->hwDevice(), mMode);
             mHardwareStatus = AUDIO_HW_IDLE;
 
-            mPrimaryOutputSampleRate = config.sample_rate;
+            mPrimaryOutputSampleRate = config->sample_rate;
         }
-        return id;
+        return NO_ERROR;
     }
 
-    return AUDIO_IO_HANDLE_NONE;
+    return NO_INIT;
 }
 
 audio_io_handle_t AudioFlinger::openDuplicateOutput(audio_io_handle_t output1,
@@ -1776,15 +1780,28 @@
     // but the ThreadBase container still exists.
 
     if (thread->type() != ThreadBase::DUPLICATING) {
-        AudioStreamOut *out = thread->clearOutput();
-        ALOG_ASSERT(out != NULL, "out shouldn't be NULL");
-        // from now on thread->mOutput is NULL
-        out->hwDev()->close_output_stream(out->hwDev(), out->stream);
-        delete out;
+        closeOutputFinish(thread);
     }
+
     return NO_ERROR;
 }
 
+void AudioFlinger::closeOutputFinish(sp<PlaybackThread> thread)
+{
+    AudioStreamOut *out = thread->clearOutput();
+    ALOG_ASSERT(out != NULL, "out shouldn't be NULL");
+    // from now on thread->mOutput is NULL
+    out->hwDev()->close_output_stream(out->hwDev(), out->stream);
+    delete out;
+}
+
+void AudioFlinger::closeOutputInternal_l(sp<PlaybackThread> thread)
+{
+    mPlaybackThreads.removeItem(thread->mId);
+    thread->exit();
+    closeOutputFinish(thread);
+}
+
 status_t AudioFlinger::suspendOutput(audio_io_handle_t output)
 {
     Mutex::Autolock _l(mLock);
@@ -1816,60 +1833,75 @@
     return NO_ERROR;
 }
 
-audio_io_handle_t AudioFlinger::openInput(audio_module_handle_t module,
-                                          audio_devices_t *pDevices,
-                                          uint32_t *pSamplingRate,
-                                          audio_format_t *pFormat,
-                                          audio_channel_mask_t *pChannelMask)
+status_t AudioFlinger::openInput(audio_module_handle_t module,
+                                          audio_io_handle_t *input,
+                                          audio_config_t *config,
+                                          audio_devices_t *device,
+                                          const String8& address,
+                                          audio_source_t source,
+                                          audio_input_flags_t flags)
 {
-    struct audio_config config;
-    memset(&config, 0, sizeof(config));
-    config.sample_rate = (pSamplingRate != NULL) ? *pSamplingRate : 0;
-    config.channel_mask = (pChannelMask != NULL) ? *pChannelMask : 0;
-    config.format = (pFormat != NULL) ? *pFormat : AUDIO_FORMAT_DEFAULT;
-
-    uint32_t reqSamplingRate = config.sample_rate;
-    audio_format_t reqFormat = config.format;
-    audio_channel_mask_t reqChannelMask = config.channel_mask;
-
-    if (pDevices == NULL || *pDevices == AUDIO_DEVICE_NONE) {
-        return 0;
-    }
-
     Mutex::Autolock _l(mLock);
 
-    AudioHwDevice *inHwDev = findSuitableHwDev_l(module, *pDevices);
+    if (*device == AUDIO_DEVICE_NONE) {
+        return BAD_VALUE;
+    }
+
+    sp<RecordThread> thread = openInput_l(module, input, config, *device, address, source, flags);
+
+    if (thread != 0) {
+        // notify client processes of the new input creation
+        thread->audioConfigChanged(AudioSystem::INPUT_OPENED);
+        return NO_ERROR;
+    }
+    return NO_INIT;
+}
+
+sp<AudioFlinger::RecordThread> AudioFlinger::openInput_l(audio_module_handle_t module,
+                                                         audio_io_handle_t *input,
+                                                         audio_config_t *config,
+                                                         audio_devices_t device,
+                                                         const String8& address,
+                                                         audio_source_t source,
+                                                         audio_input_flags_t flags)
+{
+    AudioHwDevice *inHwDev = findSuitableHwDev_l(module, device);
     if (inHwDev == NULL) {
+        *input = AUDIO_IO_HANDLE_NONE;
         return 0;
     }
 
-    audio_hw_device_t *inHwHal = inHwDev->hwDevice();
-    audio_io_handle_t id = nextUniqueId();
+    if (*input == AUDIO_IO_HANDLE_NONE) {
+        *input = nextUniqueId();
+    }
 
+    audio_config_t halconfig = *config;
+    audio_hw_device_t *inHwHal = inHwDev->hwDevice();
     audio_stream_in_t *inStream = NULL;
-    audio_input_flags_t flags = AUDIO_INPUT_FLAG_FAST;  // FIXME until added to openInput()
-    status_t status = inHwHal->open_input_stream(inHwHal, id, *pDevices, &config,
-                                        &inStream, flags);
-    ALOGV("openInput() openInputStream returned input %p, SamplingRate %d, Format %#x, Channels %x, "
-            "status %d",
+    status_t status = inHwHal->open_input_stream(inHwHal, *input, device, &halconfig,
+                                        &inStream, flags, address.string(), source);
+    ALOGV("openInput_l() openInputStream returned input %p, SamplingRate %d"
+           ", Format %#x, Channels %x, flags %#x, status %d",
             inStream,
-            config.sample_rate,
-            config.format,
-            config.channel_mask,
+            halconfig.sample_rate,
+            halconfig.format,
+            halconfig.channel_mask,
+            flags,
             status);
 
     // If the input could not be opened with the requested parameters and we can handle the
     // conversion internally, try to open again with the proposed parameters. The AudioFlinger can
     // resample the input and do mono to stereo or stereo to mono conversions on 16 bit PCM inputs.
     if (status == BAD_VALUE &&
-        reqFormat == config.format && config.format == AUDIO_FORMAT_PCM_16_BIT &&
-        (config.sample_rate <= 2 * reqSamplingRate) &&
-        (audio_channel_count_from_in_mask(config.channel_mask) <= FCC_2) &&
-        (audio_channel_count_from_in_mask(reqChannelMask) <= FCC_2)) {
+            config->format == halconfig.format && halconfig.format == AUDIO_FORMAT_PCM_16_BIT &&
+        (halconfig.sample_rate <= 2 * config->sample_rate) &&
+        (audio_channel_count_from_in_mask(halconfig.channel_mask) <= FCC_2) &&
+        (audio_channel_count_from_in_mask(config->channel_mask) <= FCC_2)) {
         // FIXME describe the change proposed by HAL (save old values so we can log them here)
-        ALOGV("openInput() reopening with proposed sampling rate and channel mask");
+        ALOGV("openInput_l() reopening with proposed sampling rate and channel mask");
         inStream = NULL;
-        status = inHwHal->open_input_stream(inHwHal, id, *pDevices, &config, &inStream, flags);
+        status = inHwHal->open_input_stream(inHwHal, *input, device, &halconfig,
+                                            &inStream, flags, address.string(), source);
         // FIXME log this new status; HAL should not propose any further changes
     }
 
@@ -1925,37 +1957,26 @@
         }
 #endif
 
-        AudioStreamIn *input = new AudioStreamIn(inHwDev, inStream);
+        AudioStreamIn *inputStream = new AudioStreamIn(inHwDev, inStream);
 
         // Start record thread
         // RecordThread requires both input and output device indication to forward to audio
         // pre processing modules
-        RecordThread *thread = new RecordThread(this,
-                                  input,
-                                  id,
+        sp<RecordThread> thread = new RecordThread(this,
+                                  inputStream,
+                                  *input,
                                   primaryOutputDevice_l(),
-                                  *pDevices
+                                  device
 #ifdef TEE_SINK
                                   , teeSink
 #endif
                                   );
-        mRecordThreads.add(id, thread);
-        ALOGV("openInput() created record thread: ID %d thread %p", id, thread);
-        if (pSamplingRate != NULL) {
-            *pSamplingRate = reqSamplingRate;
-        }
-        if (pFormat != NULL) {
-            *pFormat = config.format;
-        }
-        if (pChannelMask != NULL) {
-            *pChannelMask = reqChannelMask;
-        }
-
-        // notify client processes of the new input creation
-        thread->audioConfigChanged(AudioSystem::INPUT_OPENED);
-        return id;
+        mRecordThreads.add(*input, thread);
+        ALOGV("openInput_l() created record thread: ID %d thread %p", *input, thread.get());
+        return thread;
     }
 
+    *input = AUDIO_IO_HANDLE_NONE;
     return 0;
 }
 
@@ -1980,17 +2001,26 @@
         audioConfigChanged(AudioSystem::INPUT_CLOSED, input, NULL);
         mRecordThreads.removeItem(input);
     }
-    thread->exit();
-    // The thread entity (active unit of execution) is no longer running here,
-    // but the ThreadBase container still exists.
+    // FIXME: calling thread->exit() without mLock held should not be needed anymore now that
+    // we have a different lock for notification client
+    closeInputFinish(thread);
+    return NO_ERROR;
+}
 
+void AudioFlinger::closeInputFinish(sp<RecordThread> thread)
+{
+    thread->exit();
     AudioStreamIn *in = thread->clearInput();
     ALOG_ASSERT(in != NULL, "in shouldn't be NULL");
     // from now on thread->mInput is NULL
     in->hwDev()->close_input_stream(in->hwDev(), in->stream);
     delete in;
+}
 
-    return NO_ERROR;
+void AudioFlinger::closeInputInternal_l(sp<RecordThread> thread)
+{
+    mRecordThreads.removeItem(thread->mId);
+    closeInputFinish(thread);
 }
 
 status_t AudioFlinger::invalidateStream(audio_stream_type_t stream)
@@ -2007,7 +2037,7 @@
 }
 
 
-int AudioFlinger::newAudioSessionId()
+audio_unique_id_t AudioFlinger::newAudioUniqueId()
 {
     return nextUniqueId();
 }
@@ -2461,6 +2491,16 @@
         return INVALID_OPERATION;
     }
 
+    // Check whether the destination thread has a channel count of FCC_2, which is
+    // currently required for (most) effects. Prevent moving the effect chain here rather
+    // than disabling the addEffect_l() call in dstThread below.
+    if (dstThread->mChannelCount != FCC_2) {
+        ALOGW("moveEffectChain_l() effect chain failed because"
+                " destination thread %p channel count(%u) != %u",
+                dstThread, dstThread->mChannelCount, FCC_2);
+        return INVALID_OPERATION;
+    }
+
     // remove chain first. This is useful only if reconfiguring effect chain on same output thread,
     // so that a new chain is created with correct parameters when first effect is added. This is
     // otherwise unnecessary as removeEffect_l() will remove the chain when last effect is
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index be19554..31c5a1a 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -55,6 +55,7 @@
 #include "FastMixer.h"
 #include <media/nbaio/NBAIO.h>
 #include "AudioWatchdog.h"
+#include "AudioMixer.h"
 
 #include <powermanager/IPowerManager.h>
 
@@ -157,14 +158,13 @@
     virtual     size_t      getInputBufferSize(uint32_t sampleRate, audio_format_t format,
                                                audio_channel_mask_t channelMask) const;
 
-    virtual audio_io_handle_t openOutput(audio_module_handle_t module,
-                                         audio_devices_t *pDevices,
-                                         uint32_t *pSamplingRate,
-                                         audio_format_t *pFormat,
-                                         audio_channel_mask_t *pChannelMask,
-                                         uint32_t *pLatencyMs,
-                                         audio_output_flags_t flags,
-                                         const audio_offload_info_t *offloadInfo);
+    virtual status_t openOutput(audio_module_handle_t module,
+                                audio_io_handle_t *output,
+                                audio_config_t *config,
+                                audio_devices_t *devices,
+                                const String8& address,
+                                uint32_t *latencyMs,
+                                audio_output_flags_t flags);
 
     virtual audio_io_handle_t openDuplicateOutput(audio_io_handle_t output1,
                                                   audio_io_handle_t output2);
@@ -175,11 +175,13 @@
 
     virtual status_t restoreOutput(audio_io_handle_t output);
 
-    virtual audio_io_handle_t openInput(audio_module_handle_t module,
-                                        audio_devices_t *pDevices,
-                                        uint32_t *pSamplingRate,
-                                        audio_format_t *pFormat,
-                                        audio_channel_mask_t *pChannelMask);
+    virtual status_t openInput(audio_module_handle_t module,
+                               audio_io_handle_t *input,
+                               audio_config_t *config,
+                               audio_devices_t *device,
+                               const String8& address,
+                               audio_source_t source,
+                               audio_input_flags_t flags);
 
     virtual status_t closeInput(audio_io_handle_t input);
 
@@ -192,7 +194,7 @@
 
     virtual uint32_t getInputFramesLost(audio_io_handle_t ioHandle) const;
 
-    virtual int newAudioSessionId();
+    virtual audio_unique_id_t newAudioUniqueId();
 
     virtual void acquireAudioSessionId(int audioSession, pid_t pid);
 
@@ -326,6 +328,30 @@
                                                 audio_devices_t devices);
     void                    purgeStaleEffects_l();
 
+    // Set kEnableExtendedChannels to true to enable greater than stereo output
+    // for the MixerThread and device sink.  Number of channels allowed is
+    // FCC_2 <= channels <= AudioMixer::MAX_NUM_CHANNELS.
+    static const bool kEnableExtendedChannels = true;
+
+    // Returns true if channel mask is permitted for the PCM sink in the MixerThread
+    static inline bool isValidPcmSinkChannelMask(audio_channel_mask_t channelMask) {
+        switch (audio_channel_mask_get_representation(channelMask)) {
+        case AUDIO_CHANNEL_REPRESENTATION_POSITION: {
+            uint32_t channelCount = FCC_2; // stereo is default
+            if (kEnableExtendedChannels) {
+                channelCount = audio_channel_count_from_out_mask(channelMask);
+                if (channelCount > AudioMixer::MAX_NUM_CHANNELS) {
+                    return false;
+                }
+            }
+            // check that channelMask is the "canonical" one we expect for the channelCount.
+            return channelMask == audio_channel_out_mask_from_count(channelCount);
+            }
+        default:
+            return false;
+        }
+    }
+
     // Set kEnableExtendedPrecision to true to use extended precision in MixerThread
     static const bool kEnableExtendedPrecision = true;
 
@@ -488,6 +514,23 @@
               PlaybackThread *checkPlaybackThread_l(audio_io_handle_t output) const;
               MixerThread *checkMixerThread_l(audio_io_handle_t output) const;
               RecordThread *checkRecordThread_l(audio_io_handle_t input) const;
+              sp<RecordThread> openInput_l(audio_module_handle_t module,
+                                           audio_io_handle_t *input,
+                                           audio_config_t *config,
+                                           audio_devices_t device,
+                                           const String8& address,
+                                           audio_source_t source,
+                                           audio_input_flags_t flags);
+              sp<PlaybackThread> openOutput_l(audio_module_handle_t module,
+                                              audio_io_handle_t *output,
+                                              audio_config_t *config,
+                                              audio_devices_t devices,
+                                              const String8& address,
+                                              audio_output_flags_t flags);
+
+              void closeOutputFinish(sp<PlaybackThread> thread);
+              void closeInputFinish(sp<RecordThread> thread);
+
               // no range check, AudioFlinger::mLock held
               bool streamMute_l(audio_stream_type_t stream) const
                                 { return mStreamTypes[stream].mute; }
@@ -529,10 +572,11 @@
             AHWD_CAN_SET_MASTER_MUTE    = 0x2,
         };
 
-        AudioHwDevice(const char *moduleName,
+        AudioHwDevice(audio_module_handle_t handle,
+                      const char *moduleName,
                       audio_hw_device_t *hwDevice,
                       Flags flags)
-            : mModuleName(strdup(moduleName))
+            : mHandle(handle), mModuleName(strdup(moduleName))
             , mHwDevice(hwDevice)
             , mFlags(flags) { }
         /*virtual*/ ~AudioHwDevice() { free((void *)mModuleName); }
@@ -545,11 +589,13 @@
             return (0 != (mFlags & AHWD_CAN_SET_MASTER_MUTE));
         }
 
+        audio_module_handle_t handle() const { return mHandle; }
         const char *moduleName() const { return mModuleName; }
         audio_hw_device_t *hwDevice() const { return mHwDevice; }
         uint32_t version() const { return mHwDevice->common.version; }
 
     private:
+        const audio_module_handle_t mHandle;
         const char * const mModuleName;
         audio_hw_device_t * const mHwDevice;
         const Flags mFlags;
@@ -668,7 +714,9 @@
 
     // for use from destructor
     status_t    closeOutput_nonvirtual(audio_io_handle_t output);
+    void        closeOutputInternal_l(sp<PlaybackThread> thread);
     status_t    closeInput_nonvirtual(audio_io_handle_t input);
+    void        closeInputInternal_l(sp<RecordThread> thread);
 
 #ifdef TEE_SINK
     // all record threads serially share a common tee sink, which is re-created on format change
diff --git a/services/audioflinger/AudioMixer.cpp b/services/audioflinger/AudioMixer.cpp
index e57cb8a..6edca1b 100644
--- a/services/audioflinger/AudioMixer.cpp
+++ b/services/audioflinger/AudioMixer.cpp
@@ -40,16 +40,19 @@
 #include <common_time/cc_helper.h>
 
 #include <media/EffectsFactoryApi.h>
+#include <audio_effects/effect_downmix.h>
 
 #include "AudioMixerOps.h"
 #include "AudioMixer.h"
 
-// Use the FCC_2 macro for code assuming Fixed Channel Count of 2 and
-// whose stereo assumption may need to be revisited later.
+// The FCC_2 macro refers to the Fixed Channel Count of 2 for the legacy integer mixer.
 #ifndef FCC_2
 #define FCC_2 2
 #endif
 
+// Look for MONO_HACK for any Mono hack involving legacy mono channel to
+// stereo channel conversion.
+
 /* VERY_VERY_VERBOSE_LOGGING will show exactly which process hook and track hook is
  * being used. This is a considerable amount of log spam, so don't enable unless you
  * are verifying the hook based code.
@@ -62,6 +65,10 @@
 #define ALOGVV(a...) do { } while (0)
 #endif
 
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(x) (sizeof(x)/sizeof((x)[0]))
+#endif
+
 // Set kUseNewMixer to true to use the new mixer engine. Otherwise the
 // original code will be used.  This is false for now.
 static const bool kUseNewMixer = false;
@@ -71,52 +78,12 @@
 // because of downmix/upmix support.
 static const bool kUseFloat = true;
 
+// Set to default copy buffer size in frames for input processing.
+static const size_t kCopyBufferFrameCount = 256;
+
 namespace android {
 
 // ----------------------------------------------------------------------------
-AudioMixer::DownmixerBufferProvider::DownmixerBufferProvider() : AudioBufferProvider(),
-        mTrackBufferProvider(NULL), mDownmixHandle(NULL)
-{
-}
-
-AudioMixer::DownmixerBufferProvider::~DownmixerBufferProvider()
-{
-    ALOGV("AudioMixer deleting DownmixerBufferProvider (%p)", this);
-    EffectRelease(mDownmixHandle);
-}
-
-status_t AudioMixer::DownmixerBufferProvider::getNextBuffer(AudioBufferProvider::Buffer *pBuffer,
-        int64_t pts) {
-    //ALOGV("DownmixerBufferProvider::getNextBuffer()");
-    if (mTrackBufferProvider != NULL) {
-        status_t res = mTrackBufferProvider->getNextBuffer(pBuffer, pts);
-        if (res == OK) {
-            mDownmixConfig.inputCfg.buffer.frameCount = pBuffer->frameCount;
-            mDownmixConfig.inputCfg.buffer.raw = pBuffer->raw;
-            mDownmixConfig.outputCfg.buffer.frameCount = pBuffer->frameCount;
-            mDownmixConfig.outputCfg.buffer.raw = mDownmixConfig.inputCfg.buffer.raw;
-            // in-place so overwrite the buffer contents, has been set in prepareTrackForDownmix()
-            //mDownmixConfig.outputCfg.accessMode = EFFECT_BUFFER_ACCESS_WRITE;
-
-            res = (*mDownmixHandle)->process(mDownmixHandle,
-                    &mDownmixConfig.inputCfg.buffer, &mDownmixConfig.outputCfg.buffer);
-            //ALOGV("getNextBuffer is downmixing");
-        }
-        return res;
-    } else {
-        ALOGE("DownmixerBufferProvider::getNextBuffer() error: NULL track buffer provider");
-        return NO_INIT;
-    }
-}
-
-void AudioMixer::DownmixerBufferProvider::releaseBuffer(AudioBufferProvider::Buffer *pBuffer) {
-    //ALOGV("DownmixerBufferProvider::releaseBuffer()");
-    if (mTrackBufferProvider != NULL) {
-        mTrackBufferProvider->releaseBuffer(pBuffer);
-    } else {
-        ALOGE("DownmixerBufferProvider::releaseBuffer() error: NULL track buffer provider");
-    }
-}
 
 template <typename T>
 T min(const T& a, const T& b)
@@ -124,102 +91,289 @@
     return a < b ? a : b;
 }
 
-AudioMixer::ReformatBufferProvider::ReformatBufferProvider(int32_t channels,
-        audio_format_t inputFormat, audio_format_t outputFormat) :
-        mTrackBufferProvider(NULL),
-        mChannels(channels),
-        mInputFormat(inputFormat),
-        mOutputFormat(outputFormat),
-        mInputFrameSize(channels * audio_bytes_per_sample(inputFormat)),
-        mOutputFrameSize(channels * audio_bytes_per_sample(outputFormat)),
-        mOutputData(NULL),
-        mOutputCount(0),
+AudioMixer::CopyBufferProvider::CopyBufferProvider(size_t inputFrameSize,
+        size_t outputFrameSize, size_t bufferFrameCount) :
+        mInputFrameSize(inputFrameSize),
+        mOutputFrameSize(outputFrameSize),
+        mLocalBufferFrameCount(bufferFrameCount),
+        mLocalBufferData(NULL),
         mConsumed(0)
 {
-    ALOGV("ReformatBufferProvider(%p)(%d, %#x, %#x)", this, channels, inputFormat, outputFormat);
-    if (requiresInternalBuffers()) {
-        mOutputCount = 256;
-        (void)posix_memalign(&mOutputData, 32, mOutputCount * mOutputFrameSize);
+    ALOGV("CopyBufferProvider(%p)(%zu, %zu, %zu)", this,
+            inputFrameSize, outputFrameSize, bufferFrameCount);
+    LOG_ALWAYS_FATAL_IF(inputFrameSize < outputFrameSize && bufferFrameCount == 0,
+            "Requires local buffer if inputFrameSize(%zu) < outputFrameSize(%zu)",
+            inputFrameSize, outputFrameSize);
+    if (mLocalBufferFrameCount) {
+        (void)posix_memalign(&mLocalBufferData, 32, mLocalBufferFrameCount * mOutputFrameSize);
     }
     mBuffer.frameCount = 0;
 }
 
-AudioMixer::ReformatBufferProvider::~ReformatBufferProvider()
+AudioMixer::CopyBufferProvider::~CopyBufferProvider()
 {
-    ALOGV("~ReformatBufferProvider(%p)", this);
+    ALOGV("~CopyBufferProvider(%p)", this);
     if (mBuffer.frameCount != 0) {
         mTrackBufferProvider->releaseBuffer(&mBuffer);
     }
-    free(mOutputData);
+    free(mLocalBufferData);
 }
 
-status_t AudioMixer::ReformatBufferProvider::getNextBuffer(AudioBufferProvider::Buffer *pBuffer,
-        int64_t pts) {
-    //ALOGV("ReformatBufferProvider(%p)::getNextBuffer(%p (%zu), %lld)",
+status_t AudioMixer::CopyBufferProvider::getNextBuffer(AudioBufferProvider::Buffer *pBuffer,
+        int64_t pts)
+{
+    //ALOGV("CopyBufferProvider(%p)::getNextBuffer(%p (%zu), %lld)",
     //        this, pBuffer, pBuffer->frameCount, pts);
-    if (!requiresInternalBuffers()) {
+    if (mLocalBufferFrameCount == 0) {
         status_t res = mTrackBufferProvider->getNextBuffer(pBuffer, pts);
         if (res == OK) {
-            memcpy_by_audio_format(pBuffer->raw, mOutputFormat, pBuffer->raw, mInputFormat,
-                    pBuffer->frameCount * mChannels);
+            copyFrames(pBuffer->raw, pBuffer->raw, pBuffer->frameCount);
         }
         return res;
     }
     if (mBuffer.frameCount == 0) {
         mBuffer.frameCount = pBuffer->frameCount;
         status_t res = mTrackBufferProvider->getNextBuffer(&mBuffer, pts);
-        // TODO: Track down a bug in the upstream provider
-        // LOG_ALWAYS_FATAL_IF(res == OK && mBuffer.frameCount == 0,
-        //        "ReformatBufferProvider::getNextBuffer():"
-        //        " Invalid zero framecount returned from getNextBuffer()");
-        if (res != OK || mBuffer.frameCount == 0) {
+        // At one time an upstream buffer provider had
+        // res == OK and mBuffer.frameCount == 0, doesn't seem to happen now 7/18/2014.
+        //
+        // By API spec, if res != OK, then mBuffer.frameCount == 0.
+        // but there may be improper implementations.
+        ALOG_ASSERT(res == OK || mBuffer.frameCount == 0);
+        if (res != OK || mBuffer.frameCount == 0) { // not needed by API spec, but to be safe.
             pBuffer->raw = NULL;
             pBuffer->frameCount = 0;
             return res;
         }
+        mConsumed = 0;
     }
     ALOG_ASSERT(mConsumed < mBuffer.frameCount);
-    size_t count = min(mOutputCount, mBuffer.frameCount - mConsumed);
+    size_t count = min(mLocalBufferFrameCount, mBuffer.frameCount - mConsumed);
     count = min(count, pBuffer->frameCount);
-    pBuffer->raw = mOutputData;
+    pBuffer->raw = mLocalBufferData;
     pBuffer->frameCount = count;
-    //ALOGV("reformatting %d frames from %#x to %#x, %d chan",
-    //        pBuffer->frameCount, mInputFormat, mOutputFormat, mChannels);
-    memcpy_by_audio_format(pBuffer->raw, mOutputFormat,
-            (uint8_t*)mBuffer.raw + mConsumed * mInputFrameSize, mInputFormat,
-            pBuffer->frameCount * mChannels);
+    copyFrames(pBuffer->raw, (uint8_t*)mBuffer.raw + mConsumed * mInputFrameSize,
+            pBuffer->frameCount);
     return OK;
 }
 
-void AudioMixer::ReformatBufferProvider::releaseBuffer(AudioBufferProvider::Buffer *pBuffer) {
-    //ALOGV("ReformatBufferProvider(%p)::releaseBuffer(%p(%zu))",
+void AudioMixer::CopyBufferProvider::releaseBuffer(AudioBufferProvider::Buffer *pBuffer)
+{
+    //ALOGV("CopyBufferProvider(%p)::releaseBuffer(%p(%zu))",
     //        this, pBuffer, pBuffer->frameCount);
-    if (!requiresInternalBuffers()) {
+    if (mLocalBufferFrameCount == 0) {
         mTrackBufferProvider->releaseBuffer(pBuffer);
         return;
     }
     // LOG_ALWAYS_FATAL_IF(pBuffer->frameCount == 0, "Invalid framecount");
     mConsumed += pBuffer->frameCount; // TODO: update for efficiency to reuse existing content
     if (mConsumed != 0 && mConsumed >= mBuffer.frameCount) {
-        mConsumed = 0;
         mTrackBufferProvider->releaseBuffer(&mBuffer);
-        // ALOG_ASSERT(mBuffer.frameCount == 0);
+        ALOG_ASSERT(mBuffer.frameCount == 0);
     }
     pBuffer->raw = NULL;
     pBuffer->frameCount = 0;
 }
 
-void AudioMixer::ReformatBufferProvider::reset() {
+void AudioMixer::CopyBufferProvider::reset()
+{
     if (mBuffer.frameCount != 0) {
         mTrackBufferProvider->releaseBuffer(&mBuffer);
     }
     mConsumed = 0;
 }
 
-// ----------------------------------------------------------------------------
-bool AudioMixer::sIsMultichannelCapable = false;
+AudioMixer::DownmixerBufferProvider::DownmixerBufferProvider(
+        audio_channel_mask_t inputChannelMask,
+        audio_channel_mask_t outputChannelMask, audio_format_t format,
+        uint32_t sampleRate, int32_t sessionId, size_t bufferFrameCount) :
+        CopyBufferProvider(
+            audio_bytes_per_sample(format) * audio_channel_count_from_out_mask(inputChannelMask),
+            audio_bytes_per_sample(format) * audio_channel_count_from_out_mask(outputChannelMask),
+            bufferFrameCount)  // set bufferFrameCount to 0 to do in-place
+{
+    ALOGV("DownmixerBufferProvider(%p)(%#x, %#x, %#x %u %d)",
+            this, inputChannelMask, outputChannelMask, format,
+            sampleRate, sessionId);
+    if (!sIsMultichannelCapable
+            || EffectCreate(&sDwnmFxDesc.uuid,
+                    sessionId,
+                    SESSION_ID_INVALID_AND_IGNORED,
+                    &mDownmixHandle) != 0) {
+         ALOGE("DownmixerBufferProvider() error creating downmixer effect");
+         mDownmixHandle = NULL;
+         return;
+     }
+     // channel input configuration will be overridden per-track
+     mDownmixConfig.inputCfg.channels = inputChannelMask;   // FIXME: Should be bits
+     mDownmixConfig.outputCfg.channels = outputChannelMask; // FIXME: should be bits
+     mDownmixConfig.inputCfg.format = format;
+     mDownmixConfig.outputCfg.format = format;
+     mDownmixConfig.inputCfg.samplingRate = sampleRate;
+     mDownmixConfig.outputCfg.samplingRate = sampleRate;
+     mDownmixConfig.inputCfg.accessMode = EFFECT_BUFFER_ACCESS_READ;
+     mDownmixConfig.outputCfg.accessMode = EFFECT_BUFFER_ACCESS_WRITE;
+     // input and output buffer provider, and frame count will not be used as the downmix effect
+     // process() function is called directly (see DownmixerBufferProvider::getNextBuffer())
+     mDownmixConfig.inputCfg.mask = EFFECT_CONFIG_SMP_RATE | EFFECT_CONFIG_CHANNELS |
+             EFFECT_CONFIG_FORMAT | EFFECT_CONFIG_ACC_MODE;
+     mDownmixConfig.outputCfg.mask = mDownmixConfig.inputCfg.mask;
 
-effect_descriptor_t AudioMixer::sDwnmFxDesc;
+     int cmdStatus;
+     uint32_t replySize = sizeof(int);
+
+     // Configure downmixer
+     status_t status = (*mDownmixHandle)->command(mDownmixHandle,
+             EFFECT_CMD_SET_CONFIG /*cmdCode*/, sizeof(effect_config_t) /*cmdSize*/,
+             &mDownmixConfig /*pCmdData*/,
+             &replySize, &cmdStatus /*pReplyData*/);
+     if (status != 0 || cmdStatus != 0) {
+         ALOGE("DownmixerBufferProvider() error %d cmdStatus %d while configuring downmixer",
+                 status, cmdStatus);
+         EffectRelease(mDownmixHandle);
+         mDownmixHandle = NULL;
+         return;
+     }
+
+     // Enable downmixer
+     replySize = sizeof(int);
+     status = (*mDownmixHandle)->command(mDownmixHandle,
+             EFFECT_CMD_ENABLE /*cmdCode*/, 0 /*cmdSize*/, NULL /*pCmdData*/,
+             &replySize, &cmdStatus /*pReplyData*/);
+     if (status != 0 || cmdStatus != 0) {
+         ALOGE("DownmixerBufferProvider() error %d cmdStatus %d while enabling downmixer",
+                 status, cmdStatus);
+         EffectRelease(mDownmixHandle);
+         mDownmixHandle = NULL;
+         return;
+     }
+
+     // Set downmix type
+     // parameter size rounded for padding on 32bit boundary
+     const int psizePadded = ((sizeof(downmix_params_t) - 1)/sizeof(int) + 1) * sizeof(int);
+     const int downmixParamSize =
+             sizeof(effect_param_t) + psizePadded + sizeof(downmix_type_t);
+     effect_param_t * const param = (effect_param_t *) malloc(downmixParamSize);
+     param->psize = sizeof(downmix_params_t);
+     const downmix_params_t downmixParam = DOWNMIX_PARAM_TYPE;
+     memcpy(param->data, &downmixParam, param->psize);
+     const downmix_type_t downmixType = DOWNMIX_TYPE_FOLD;
+     param->vsize = sizeof(downmix_type_t);
+     memcpy(param->data + psizePadded, &downmixType, param->vsize);
+     replySize = sizeof(int);
+     status = (*mDownmixHandle)->command(mDownmixHandle,
+             EFFECT_CMD_SET_PARAM /* cmdCode */, downmixParamSize /* cmdSize */,
+             param /*pCmdData*/, &replySize, &cmdStatus /*pReplyData*/);
+     free(param);
+     if (status != 0 || cmdStatus != 0) {
+         ALOGE("DownmixerBufferProvider() error %d cmdStatus %d while setting downmix type",
+                 status, cmdStatus);
+         EffectRelease(mDownmixHandle);
+         mDownmixHandle = NULL;
+         return;
+     }
+     ALOGV("DownmixerBufferProvider() downmix type set to %d", (int) downmixType);
+}
+
+AudioMixer::DownmixerBufferProvider::~DownmixerBufferProvider()
+{
+    ALOGV("~DownmixerBufferProvider (%p)", this);
+    EffectRelease(mDownmixHandle);
+    mDownmixHandle = NULL;
+}
+
+void AudioMixer::DownmixerBufferProvider::copyFrames(void *dst, const void *src, size_t frames)
+{
+    mDownmixConfig.inputCfg.buffer.frameCount = frames;
+    mDownmixConfig.inputCfg.buffer.raw = const_cast<void *>(src);
+    mDownmixConfig.outputCfg.buffer.frameCount = frames;
+    mDownmixConfig.outputCfg.buffer.raw = dst;
+    // may be in-place if src == dst.
+    status_t res = (*mDownmixHandle)->process(mDownmixHandle,
+            &mDownmixConfig.inputCfg.buffer, &mDownmixConfig.outputCfg.buffer);
+    ALOGE_IF(res != OK, "DownmixBufferProvider error %d", res);
+}
+
+/* call once in a pthread_once handler. */
+/*static*/ status_t AudioMixer::DownmixerBufferProvider::init()
+{
+    // find multichannel downmix effect if we have to play multichannel content
+    uint32_t numEffects = 0;
+    int ret = EffectQueryNumberEffects(&numEffects);
+    if (ret != 0) {
+        ALOGE("AudioMixer() error %d querying number of effects", ret);
+        return NO_INIT;
+    }
+    ALOGV("EffectQueryNumberEffects() numEffects=%d", numEffects);
+
+    for (uint32_t i = 0 ; i < numEffects ; i++) {
+        if (EffectQueryEffect(i, &sDwnmFxDesc) == 0) {
+            ALOGV("effect %d is called %s", i, sDwnmFxDesc.name);
+            if (memcmp(&sDwnmFxDesc.type, EFFECT_UIID_DOWNMIX, sizeof(effect_uuid_t)) == 0) {
+                ALOGI("found effect \"%s\" from %s",
+                        sDwnmFxDesc.name, sDwnmFxDesc.implementor);
+                sIsMultichannelCapable = true;
+                break;
+            }
+        }
+    }
+    ALOGW_IF(!sIsMultichannelCapable, "unable to find downmix effect");
+    return NO_INIT;
+}
+
+/*static*/ bool AudioMixer::DownmixerBufferProvider::sIsMultichannelCapable = false;
+/*static*/ effect_descriptor_t AudioMixer::DownmixerBufferProvider::sDwnmFxDesc;
+
+AudioMixer::RemixBufferProvider::RemixBufferProvider(audio_channel_mask_t inputChannelMask,
+        audio_channel_mask_t outputChannelMask, audio_format_t format,
+        size_t bufferFrameCount) :
+        CopyBufferProvider(
+                audio_bytes_per_sample(format)
+                    * audio_channel_count_from_out_mask(inputChannelMask),
+                audio_bytes_per_sample(format)
+                    * audio_channel_count_from_out_mask(outputChannelMask),
+                bufferFrameCount),
+        mFormat(format),
+        mSampleSize(audio_bytes_per_sample(format)),
+        mInputChannels(audio_channel_count_from_out_mask(inputChannelMask)),
+        mOutputChannels(audio_channel_count_from_out_mask(outputChannelMask))
+{
+    ALOGV("RemixBufferProvider(%p)(%#x, %#x, %#x) %zu %zu",
+            this, format, inputChannelMask, outputChannelMask,
+            mInputChannels, mOutputChannels);
+    // TODO: consider channel representation in index array formulation
+    // We ignore channel representation, and just use the bits.
+    memcpy_by_index_array_initialization(mIdxAry, ARRAY_SIZE(mIdxAry),
+            audio_channel_mask_get_bits(outputChannelMask),
+            audio_channel_mask_get_bits(inputChannelMask));
+}
+
+void AudioMixer::RemixBufferProvider::copyFrames(void *dst, const void *src, size_t frames)
+{
+    memcpy_by_index_array(dst, mOutputChannels,
+            src, mInputChannels, mIdxAry, mSampleSize, frames);
+}
+
+AudioMixer::ReformatBufferProvider::ReformatBufferProvider(int32_t channels,
+        audio_format_t inputFormat, audio_format_t outputFormat,
+        size_t bufferFrameCount) :
+        CopyBufferProvider(
+            channels * audio_bytes_per_sample(inputFormat),
+            channels * audio_bytes_per_sample(outputFormat),
+            bufferFrameCount),
+        mChannels(channels),
+        mInputFormat(inputFormat),
+        mOutputFormat(outputFormat)
+{
+    ALOGV("ReformatBufferProvider(%p)(%d, %#x, %#x)", this, channels, inputFormat, outputFormat);
+}
+
+void AudioMixer::ReformatBufferProvider::copyFrames(void *dst, const void *src, size_t frames)
+{
+    memcpy_by_audio_format(dst, mOutputFormat, src, mInputFormat, frames * mChannels);
+}
+
+// ----------------------------------------------------------------------------
 
 // Ensure mConfiguredNames bitmask is initialized properly on all architectures.
 // The value of 1 << x is undefined in C when x >= 32.
@@ -228,18 +382,12 @@
     :   mTrackNames(0), mConfiguredNames((maxNumTracks >= 32 ? 0 : 1 << maxNumTracks) - 1),
         mSampleRate(sampleRate)
 {
-    // AudioMixer is not yet capable of multi-channel beyond stereo
-    COMPILE_TIME_ASSERT_FUNCTION_SCOPE(2 == MAX_NUM_CHANNELS);
-
     ALOG_ASSERT(maxNumTracks <= MAX_NUM_TRACKS, "maxNumTracks %u > MAX_NUM_TRACKS %u",
             maxNumTracks, MAX_NUM_TRACKS);
 
     // AudioMixer is not yet capable of more than 32 active track inputs
     ALOG_ASSERT(32 >= MAX_NUM_TRACKS, "bad MAX_NUM_TRACKS %d", MAX_NUM_TRACKS);
 
-    // AudioMixer is not yet capable of multi-channel output beyond stereo
-    ALOG_ASSERT(2 == MAX_NUM_CHANNELS, "bad MAX_NUM_CHANNELS %d", MAX_NUM_CHANNELS);
-
     pthread_once(&sOnceControl, &sInitRoutine);
 
     mState.enabledTracks= 0;
@@ -258,6 +406,7 @@
     for (unsigned i=0 ; i < MAX_NUM_TRACKS ; i++) {
         t->resampler = NULL;
         t->downmixerBufferProvider = NULL;
+        t->mReformatBufferProvider = NULL;
         t++;
     }
 
@@ -269,6 +418,7 @@
     for (unsigned i=0 ; i < MAX_NUM_TRACKS ; i++) {
         delete t->resampler;
         delete t->downmixerBufferProvider;
+        delete t->mReformatBufferProvider;
         t++;
     }
     delete [] mState.outputTemp;
@@ -323,7 +473,7 @@
         // t->frameCount
         t->channelCount = audio_channel_count_from_out_mask(channelMask);
         t->enabled = false;
-        ALOGV_IF(channelMask != AUDIO_CHANNEL_OUT_STEREO,
+        ALOGV_IF(audio_channel_mask_get_bits(channelMask) != AUDIO_CHANNEL_OUT_STEREO,
                 "Non-stereo channel mask: %d\n", channelMask);
         t->channelMask = channelMask;
         t->sessionId = sessionId;
@@ -346,8 +496,11 @@
         t->mFormat = format;
         t->mMixerInFormat = kUseFloat && kUseNewMixer
                 ? AUDIO_FORMAT_PCM_FLOAT : AUDIO_FORMAT_PCM_16_BIT;
+        t->mMixerChannelMask = audio_channel_mask_from_representation_and_bits(
+                AUDIO_CHANNEL_REPRESENTATION_POSITION, AUDIO_CHANNEL_OUT_STEREO);
+        t->mMixerChannelCount = audio_channel_count_from_out_mask(t->mMixerChannelMask);
         // Check the downmixing (or upmixing) requirements.
-        status_t status = initTrackDownmix(t, n, channelMask);
+        status_t status = initTrackDownmix(t, n);
         if (status != OK) {
             ALOGE("AudioMixer::getTrackName invalid channelMask (%#x)", channelMask);
             return -1;
@@ -372,21 +525,69 @@
     }
  }
 
-status_t AudioMixer::initTrackDownmix(track_t* pTrack, int trackNum, audio_channel_mask_t mask)
-{
-    uint32_t channelCount = audio_channel_count_from_out_mask(mask);
-    ALOG_ASSERT((channelCount <= MAX_NUM_CHANNELS_TO_DOWNMIX) && channelCount);
-    status_t status = OK;
-    if (channelCount > MAX_NUM_CHANNELS) {
-        pTrack->channelMask = mask;
-        pTrack->channelCount = channelCount;
-        ALOGV("initTrackDownmix(track=%d, mask=0x%x) calls prepareTrackForDownmix()",
-                trackNum, mask);
-        status = prepareTrackForDownmix(pTrack, trackNum);
-    } else {
-        unprepareTrackForDownmix(pTrack, trackNum);
+// Called when channel masks have changed for a track name
+// TODO: Fix Downmixbufferprofider not to (possibly) change mixer input format,
+// which will simplify this logic.
+bool AudioMixer::setChannelMasks(int name,
+        audio_channel_mask_t trackChannelMask, audio_channel_mask_t mixerChannelMask) {
+    track_t &track = mState.tracks[name];
+
+    if (trackChannelMask == track.channelMask
+            && mixerChannelMask == track.mMixerChannelMask) {
+        return false;  // no need to change
     }
-    return status;
+    // always recompute for both channel masks even if only one has changed.
+    const uint32_t trackChannelCount = audio_channel_count_from_out_mask(trackChannelMask);
+    const uint32_t mixerChannelCount = audio_channel_count_from_out_mask(mixerChannelMask);
+    const bool mixerChannelCountChanged = track.mMixerChannelCount != mixerChannelCount;
+
+    ALOG_ASSERT((trackChannelCount <= MAX_NUM_CHANNELS_TO_DOWNMIX)
+            && trackChannelCount
+            && mixerChannelCount);
+    track.channelMask = trackChannelMask;
+    track.channelCount = trackChannelCount;
+    track.mMixerChannelMask = mixerChannelMask;
+    track.mMixerChannelCount = mixerChannelCount;
+
+    // channel masks have changed, does this track need a downmixer?
+    // update to try using our desired format (if we aren't already using it)
+    const audio_format_t prevMixerInFormat = track.mMixerInFormat;
+    track.mMixerInFormat = kUseFloat && kUseNewMixer
+            ? AUDIO_FORMAT_PCM_FLOAT : AUDIO_FORMAT_PCM_16_BIT;
+    const status_t status = initTrackDownmix(&mState.tracks[name], name);
+    ALOGE_IF(status != OK,
+            "initTrackDownmix error %d, track channel mask %#x, mixer channel mask %#x",
+            status, track.channelMask, track.mMixerChannelMask);
+
+    const bool mixerInFormatChanged = prevMixerInFormat != track.mMixerInFormat;
+    if (mixerInFormatChanged) {
+        prepareTrackForReformat(&track, name); // because of downmixer, track format may change!
+    }
+
+    if (track.resampler && (mixerInFormatChanged || mixerChannelCountChanged)) {
+        // resampler input format or channels may have changed.
+        const uint32_t resetToSampleRate = track.sampleRate;
+        delete track.resampler;
+        track.resampler = NULL;
+        track.sampleRate = mSampleRate; // without resampler, track rate is device sample rate.
+        // recreate the resampler with updated format, channels, saved sampleRate.
+        track.setResampler(resetToSampleRate /*trackSampleRate*/, mSampleRate /*devSampleRate*/);
+    }
+    return true;
+}
+
+status_t AudioMixer::initTrackDownmix(track_t* pTrack, int trackName)
+{
+    // Only remix (upmix or downmix) if the track and mixer/device channel masks
+    // are not the same and not handled internally, as mono -> stereo currently is.
+    if (pTrack->channelMask != pTrack->mMixerChannelMask
+            && !(pTrack->channelMask == AUDIO_CHANNEL_OUT_MONO
+                    && pTrack->mMixerChannelMask == AUDIO_CHANNEL_OUT_STEREO)) {
+        return prepareTrackForDownmix(pTrack, trackName);
+    }
+    // no remix necessary
+    unprepareTrackForDownmix(pTrack, trackName);
+    return NO_ERROR;
 }
 
 void AudioMixer::unprepareTrackForDownmix(track_t* pTrack, int trackName __unused) {
@@ -409,98 +610,28 @@
 
     // discard the previous downmixer if there was one
     unprepareTrackForDownmix(pTrack, trackName);
+    if (DownmixerBufferProvider::isMultichannelCapable()) {
+        DownmixerBufferProvider* pDbp = new DownmixerBufferProvider(pTrack->channelMask,
+                pTrack->mMixerChannelMask,
+                AUDIO_FORMAT_PCM_16_BIT /* TODO: use pTrack->mMixerInFormat, now only PCM 16 */,
+                pTrack->sampleRate, pTrack->sessionId, kCopyBufferFrameCount);
 
-    DownmixerBufferProvider* pDbp = new DownmixerBufferProvider();
-    int32_t status;
-
-    if (!sIsMultichannelCapable) {
-        ALOGE("prepareTrackForDownmix(%d) fails: mixer doesn't support multichannel content",
-                trackName);
-        goto noDownmixForActiveTrack;
+        if (pDbp->isValid()) { // if constructor completed properly
+            pTrack->mMixerInFormat = AUDIO_FORMAT_PCM_16_BIT; // PCM 16 bit required for downmix
+            pTrack->downmixerBufferProvider = pDbp;
+            reconfigureBufferProviders(pTrack);
+            return NO_ERROR;
+        }
+        delete pDbp;
     }
 
-    if (EffectCreate(&sDwnmFxDesc.uuid,
-            pTrack->sessionId /*sessionId*/, -2 /*ioId not relevant here, using random value*/,
-            &pDbp->mDownmixHandle/*pHandle*/) != 0) {
-        ALOGE("prepareTrackForDownmix(%d) fails: error creating downmixer effect", trackName);
-        goto noDownmixForActiveTrack;
-    }
-
-    // channel input configuration will be overridden per-track
-    pDbp->mDownmixConfig.inputCfg.channels = pTrack->channelMask;
-    pDbp->mDownmixConfig.outputCfg.channels = AUDIO_CHANNEL_OUT_STEREO;
-    pDbp->mDownmixConfig.inputCfg.format = AUDIO_FORMAT_PCM_16_BIT;
-    pDbp->mDownmixConfig.outputCfg.format = AUDIO_FORMAT_PCM_16_BIT;
-    pDbp->mDownmixConfig.inputCfg.samplingRate = pTrack->sampleRate;
-    pDbp->mDownmixConfig.outputCfg.samplingRate = pTrack->sampleRate;
-    pDbp->mDownmixConfig.inputCfg.accessMode = EFFECT_BUFFER_ACCESS_READ;
-    pDbp->mDownmixConfig.outputCfg.accessMode = EFFECT_BUFFER_ACCESS_WRITE;
-    // input and output buffer provider, and frame count will not be used as the downmix effect
-    // process() function is called directly (see DownmixerBufferProvider::getNextBuffer())
-    pDbp->mDownmixConfig.inputCfg.mask = EFFECT_CONFIG_SMP_RATE | EFFECT_CONFIG_CHANNELS |
-            EFFECT_CONFIG_FORMAT | EFFECT_CONFIG_ACC_MODE;
-    pDbp->mDownmixConfig.outputCfg.mask = pDbp->mDownmixConfig.inputCfg.mask;
-
-    {// scope for local variables that are not used in goto label "noDownmixForActiveTrack"
-        int cmdStatus;
-        uint32_t replySize = sizeof(int);
-
-        // Configure and enable downmixer
-        status = (*pDbp->mDownmixHandle)->command(pDbp->mDownmixHandle,
-                EFFECT_CMD_SET_CONFIG /*cmdCode*/, sizeof(effect_config_t) /*cmdSize*/,
-                &pDbp->mDownmixConfig /*pCmdData*/,
-                &replySize /*replySize*/, &cmdStatus /*pReplyData*/);
-        if ((status != 0) || (cmdStatus != 0)) {
-            ALOGE("error %d while configuring downmixer for track %d", status, trackName);
-            goto noDownmixForActiveTrack;
-        }
-        replySize = sizeof(int);
-        status = (*pDbp->mDownmixHandle)->command(pDbp->mDownmixHandle,
-                EFFECT_CMD_ENABLE /*cmdCode*/, 0 /*cmdSize*/, NULL /*pCmdData*/,
-                &replySize /*replySize*/, &cmdStatus /*pReplyData*/);
-        if ((status != 0) || (cmdStatus != 0)) {
-            ALOGE("error %d while enabling downmixer for track %d", status, trackName);
-            goto noDownmixForActiveTrack;
-        }
-
-        // Set downmix type
-        // parameter size rounded for padding on 32bit boundary
-        const int psizePadded = ((sizeof(downmix_params_t) - 1)/sizeof(int) + 1) * sizeof(int);
-        const int downmixParamSize =
-                sizeof(effect_param_t) + psizePadded + sizeof(downmix_type_t);
-        effect_param_t * const param = (effect_param_t *) malloc(downmixParamSize);
-        param->psize = sizeof(downmix_params_t);
-        const downmix_params_t downmixParam = DOWNMIX_PARAM_TYPE;
-        memcpy(param->data, &downmixParam, param->psize);
-        const downmix_type_t downmixType = DOWNMIX_TYPE_FOLD;
-        param->vsize = sizeof(downmix_type_t);
-        memcpy(param->data + psizePadded, &downmixType, param->vsize);
-
-        status = (*pDbp->mDownmixHandle)->command(pDbp->mDownmixHandle,
-                EFFECT_CMD_SET_PARAM /* cmdCode */, downmixParamSize/* cmdSize */,
-                param /*pCmndData*/, &replySize /*replySize*/, &cmdStatus /*pReplyData*/);
-
-        free(param);
-
-        if ((status != 0) || (cmdStatus != 0)) {
-            ALOGE("error %d while setting downmix type for track %d", status, trackName);
-            goto noDownmixForActiveTrack;
-        } else {
-            ALOGV("downmix type set to %d for track %d", (int) downmixType, trackName);
-        }
-    }// end of scope for local variables that are not used in goto label "noDownmixForActiveTrack"
-
-    // initialization successful:
-    pTrack->mMixerInFormat = AUDIO_FORMAT_PCM_16_BIT; // 16 bit input is required for downmix
-    pTrack->downmixerBufferProvider = pDbp;
+    // Effect downmixer does not accept the channel conversion.  Let's use our remixer.
+    RemixBufferProvider* pRbp = new RemixBufferProvider(pTrack->channelMask,
+            pTrack->mMixerChannelMask, pTrack->mMixerInFormat, kCopyBufferFrameCount);
+    // Remix always finds a conversion whereas Downmixer effect above may fail.
+    pTrack->downmixerBufferProvider = pRbp;
     reconfigureBufferProviders(pTrack);
     return NO_ERROR;
-
-noDownmixForActiveTrack:
-    delete pDbp;
-    pTrack->downmixerBufferProvider = NULL;
-    reconfigureBufferProviders(pTrack);
-    return NO_INIT;
 }
 
 void AudioMixer::unprepareTrackForReformat(track_t* pTrack, int trackName __unused) {
@@ -521,7 +652,8 @@
     if (pTrack->mFormat != pTrack->mMixerInFormat) {
         pTrack->mReformatBufferProvider = new ReformatBufferProvider(
                 audio_channel_count_from_out_mask(pTrack->channelMask),
-                pTrack->mFormat, pTrack->mMixerInFormat);
+                pTrack->mFormat, pTrack->mMixerInFormat,
+                kCopyBufferFrameCount);
         reconfigureBufferProviders(pTrack);
     }
     return NO_ERROR;
@@ -531,11 +663,11 @@
 {
     pTrack->bufferProvider = pTrack->mInputBufferProvider;
     if (pTrack->mReformatBufferProvider) {
-        pTrack->mReformatBufferProvider->mTrackBufferProvider = pTrack->bufferProvider;
+        pTrack->mReformatBufferProvider->setBufferProvider(pTrack->bufferProvider);
         pTrack->bufferProvider = pTrack->mReformatBufferProvider;
     }
     if (pTrack->downmixerBufferProvider) {
-        pTrack->downmixerBufferProvider->mTrackBufferProvider = pTrack->bufferProvider;
+        pTrack->downmixerBufferProvider->setBufferProvider(pTrack->bufferProvider);
         pTrack->bufferProvider = pTrack->downmixerBufferProvider;
     }
 }
@@ -669,23 +801,10 @@
     case TRACK:
         switch (param) {
         case CHANNEL_MASK: {
-            audio_channel_mask_t mask =
-                static_cast<audio_channel_mask_t>(reinterpret_cast<uintptr_t>(value));
-            if (track.channelMask != mask) {
-                uint32_t channelCount = audio_channel_count_from_out_mask(mask);
-                ALOG_ASSERT((channelCount <= MAX_NUM_CHANNELS_TO_DOWNMIX) && channelCount);
-                track.channelMask = mask;
-                track.channelCount = channelCount;
-                // the mask has changed, does this track need a downmixer?
-                // update to try using our desired format (if we aren't already using it)
-                track.mMixerInFormat = kUseFloat && kUseNewMixer
-                        ? AUDIO_FORMAT_PCM_FLOAT : AUDIO_FORMAT_PCM_16_BIT;
-                status_t status = initTrackDownmix(&mState.tracks[name], name, mask);
-                ALOGE_IF(status != OK,
-                        "Invalid channel mask %#x, initTrackDownmix returned %d",
-                        mask, status);
-                ALOGV("setParameter(TRACK, CHANNEL_MASK, %x)", mask);
-                prepareTrackForReformat(&track, name); // format may have changed
+            const audio_channel_mask_t trackChannelMask =
+                static_cast<audio_channel_mask_t>(valueInt);
+            if (setChannelMasks(name, trackChannelMask, track.mMixerChannelMask)) {
+                ALOGV("setParameter(TRACK, CHANNEL_MASK, %x)", trackChannelMask);
                 invalidateState(1 << name);
             }
             } break;
@@ -724,6 +843,14 @@
                 ALOGV("setParameter(TRACK, MIXER_FORMAT, %#x)", format);
             }
             } break;
+        case MIXER_CHANNEL_MASK: {
+            const audio_channel_mask_t mixerChannelMask =
+                    static_cast<audio_channel_mask_t>(valueInt);
+            if (setChannelMasks(name, track.channelMask, mixerChannelMask)) {
+                ALOGV("setParameter(TRACK, MIXER_CHANNEL_MASK, %#x)", mixerChannelMask);
+                invalidateState(1 << name);
+            }
+            } break;
         default:
             LOG_ALWAYS_FATAL("setParameter track: bad param %d", param);
         }
@@ -757,20 +884,6 @@
     case RAMP_VOLUME:
     case VOLUME:
         switch (param) {
-        case VOLUME0:
-        case VOLUME1:
-            if (setVolumeRampVariables(*reinterpret_cast<float*>(value),
-                    target == RAMP_VOLUME ? mState.frameCount : 0,
-                    &track.volume[param - VOLUME0], &track.prevVolume[param - VOLUME0],
-                    &track.volumeInc[param - VOLUME0],
-                    &track.mVolume[param - VOLUME0], &track.mPrevVolume[param - VOLUME0],
-                    &track.mVolumeInc[param - VOLUME0])) {
-                ALOGV("setParameter(%s, VOLUME%d: %04x)",
-                        target == VOLUME ? "VOLUME" : "RAMP_VOLUME", param - VOLUME0,
-                                track.volume[param - VOLUME0]);
-                invalidateState(1 << name);
-            }
-            break;
         case AUXLEVEL:
             if (setVolumeRampVariables(*reinterpret_cast<float*>(value),
                     target == RAMP_VOLUME ? mState.frameCount : 0,
@@ -782,7 +895,21 @@
             }
             break;
         default:
-            LOG_ALWAYS_FATAL("setParameter volume: bad param %d", param);
+            if ((unsigned)param >= VOLUME0 && (unsigned)param < VOLUME0 + MAX_NUM_VOLUMES) {
+                if (setVolumeRampVariables(*reinterpret_cast<float*>(value),
+                        target == RAMP_VOLUME ? mState.frameCount : 0,
+                        &track.volume[param - VOLUME0], &track.prevVolume[param - VOLUME0],
+                        &track.volumeInc[param - VOLUME0],
+                        &track.mVolume[param - VOLUME0], &track.mPrevVolume[param - VOLUME0],
+                        &track.mVolumeInc[param - VOLUME0])) {
+                    ALOGV("setParameter(%s, VOLUME%d: %04x)",
+                            target == VOLUME ? "VOLUME" : "RAMP_VOLUME", param - VOLUME0,
+                                    track.volume[param - VOLUME0]);
+                    invalidateState(1 << name);
+                }
+            } else {
+                LOG_ALWAYS_FATAL("setParameter volume: bad param %d", param);
+            }
         }
         break;
 
@@ -791,30 +918,36 @@
     }
 }
 
-bool AudioMixer::track_t::setResampler(uint32_t value, uint32_t devSampleRate)
+bool AudioMixer::track_t::setResampler(uint32_t trackSampleRate, uint32_t devSampleRate)
 {
-    if (value != devSampleRate || resampler != NULL) {
-        if (sampleRate != value) {
-            sampleRate = value;
+    if (trackSampleRate != devSampleRate || resampler != NULL) {
+        if (sampleRate != trackSampleRate) {
+            sampleRate = trackSampleRate;
             if (resampler == NULL) {
-                ALOGV("creating resampler from track %d Hz to device %d Hz", value, devSampleRate);
+                ALOGV("Creating resampler from track %d Hz to device %d Hz",
+                        trackSampleRate, devSampleRate);
                 AudioResampler::src_quality quality;
                 // force lowest quality level resampler if use case isn't music or video
                 // FIXME this is flawed for dynamic sample rates, as we choose the resampler
                 // quality level based on the initial ratio, but that could change later.
                 // Should have a way to distinguish tracks with static ratios vs. dynamic ratios.
-                if (!((value == 44100 && devSampleRate == 48000) ||
-                      (value == 48000 && devSampleRate == 44100))) {
+                if (!((trackSampleRate == 44100 && devSampleRate == 48000) ||
+                      (trackSampleRate == 48000 && devSampleRate == 44100))) {
                     quality = AudioResampler::DYN_LOW_QUALITY;
                 } else {
                     quality = AudioResampler::DEFAULT_QUALITY;
                 }
 
-                ALOGVV("Creating resampler with %d bits\n", bits);
+                // TODO: Remove MONO_HACK. Resampler sees #channels after the downmixer
+                // but if none exists, it is the channel count (1 for mono).
+                const int resamplerChannelCount = downmixerBufferProvider != NULL
+                        ? mMixerChannelCount : channelCount;
+                ALOGVV("Creating resampler:"
+                        " format(%#x) channels(%d) devSampleRate(%u) quality(%d)\n",
+                        mMixerInFormat, resamplerChannelCount, devSampleRate, quality);
                 resampler = AudioResampler::create(
                         mMixerInFormat,
-                        // the resampler sees the number of channels after the downmixer, if any
-                        (int) (downmixerBufferProvider != NULL ? MAX_NUM_CHANNELS : channelCount),
+                        resamplerChannelCount,
                         devSampleRate, quality);
                 resampler->setLocalTimeFreq(sLocalTimeFreq);
             }
@@ -840,20 +973,19 @@
 inline void AudioMixer::track_t::adjustVolumeRamp(bool aux, bool useFloat)
 {
     if (useFloat) {
-        for (uint32_t i=0 ; i<MAX_NUM_CHANNELS ; i++) {
+        for (uint32_t i = 0; i < MAX_NUM_VOLUMES; i++) {
             if (mVolumeInc[i] != 0 && fabs(mVolume[i] - mPrevVolume[i]) <= fabs(mVolumeInc[i])) {
                 volumeInc[i] = 0;
                 prevVolume[i] = volume[i] << 16;
                 mVolumeInc[i] = 0.;
                 mPrevVolume[i] = mVolume[i];
-
             } else {
                 //ALOGV("ramp: %f %f %f", mVolume[i], mPrevVolume[i], mVolumeInc[i]);
                 prevVolume[i] = u4_28_from_float(mPrevVolume[i]);
             }
         }
     } else {
-        for (uint32_t i=0 ; i<MAX_NUM_CHANNELS ; i++) {
+        for (uint32_t i = 0; i < MAX_NUM_VOLUMES; i++) {
             if (((volumeInc[i]>0) && (((prevVolume[i]+volumeInc[i])>>16) >= volume[i])) ||
                     ((volumeInc[i]<0) && (((prevVolume[i]+volumeInc[i])>>16) <= volume[i]))) {
                 volumeInc[i] = 0;
@@ -972,18 +1104,21 @@
             if (n & NEEDS_RESAMPLE) {
                 all16BitsStereoNoResample = false;
                 resampling = true;
-                t.hook = getTrackHook(TRACKTYPE_RESAMPLE, FCC_2,
+                t.hook = getTrackHook(TRACKTYPE_RESAMPLE, t.mMixerChannelCount,
                         t.mMixerInFormat, t.mMixerFormat);
                 ALOGV_IF((n & NEEDS_CHANNEL_COUNT__MASK) > NEEDS_CHANNEL_2,
                         "Track %d needs downmix + resample", i);
             } else {
                 if ((n & NEEDS_CHANNEL_COUNT__MASK) == NEEDS_CHANNEL_1){
-                    t.hook = getTrackHook(TRACKTYPE_NORESAMPLEMONO, FCC_2,
+                    t.hook = getTrackHook(
+                            t.mMixerChannelCount == 2 // TODO: MONO_HACK.
+                                ? TRACKTYPE_NORESAMPLEMONO : TRACKTYPE_NORESAMPLE,
+                            t.mMixerChannelCount,
                             t.mMixerInFormat, t.mMixerFormat);
                     all16BitsStereoNoResample = false;
                 }
                 if ((n & NEEDS_CHANNEL_COUNT__MASK) >= NEEDS_CHANNEL_2){
-                    t.hook = getTrackHook(TRACKTYPE_NORESAMPLE, FCC_2,
+                    t.hook = getTrackHook(TRACKTYPE_NORESAMPLE, t.mMixerChannelCount,
                             t.mMixerInFormat, t.mMixerFormat);
                     ALOGV_IF((n & NEEDS_CHANNEL_COUNT__MASK) > NEEDS_CHANNEL_2,
                             "Track %d needs downmix", i);
@@ -1017,8 +1152,8 @@
                 if (countActiveTracks == 1) {
                     const int i = 31 - __builtin_clz(state->enabledTracks);
                     track_t& t = state->tracks[i];
-                    state->hook = getProcessHook(PROCESSTYPE_NORESAMPLEONETRACK, FCC_2,
-                            t.mMixerInFormat, t.mMixerFormat);
+                    state->hook = getProcessHook(PROCESSTYPE_NORESAMPLEONETRACK,
+                            t.mMixerChannelCount, t.mMixerInFormat, t.mMixerFormat);
                 }
             }
         }
@@ -1051,7 +1186,10 @@
             state->hook = process__nop;
         } else if (all16BitsStereoNoResample) {
             if (countActiveTracks == 1) {
-                state->hook = process__OneTrack16BitsStereoNoResampling;
+                const int i = 31 - __builtin_clz(state->enabledTracks);
+                track_t& t = state->tracks[i];
+                state->hook = getProcessHook(PROCESSTYPE_NORESAMPLEONETRACK,
+                        t.mMixerChannelCount, t.mMixerInFormat, t.mMixerFormat);
             }
         }
     }
@@ -1068,9 +1206,8 @@
     if (aux != NULL) {
         // always resample with unity gain when sending to auxiliary buffer to be able
         // to apply send level after resampling
-        // TODO: modify each resampler to support aux channel?
         t->resampler->setVolume(UNITY_GAIN_FLOAT, UNITY_GAIN_FLOAT);
-        memset(temp, 0, outFrameCount * MAX_NUM_CHANNELS * sizeof(int32_t));
+        memset(temp, 0, outFrameCount * t->mMixerChannelCount * sizeof(int32_t));
         t->resampler->resample(temp, outFrameCount, t->bufferProvider);
         if (CC_UNLIKELY(t->volumeInc[0]|t->volumeInc[1]|t->auxInc)) {
             volumeRampStereo(t, out, outFrameCount, temp, aux);
@@ -1355,7 +1492,6 @@
 {
     ALOGVV("process__nop\n");
     uint32_t e0 = state->enabledTracks;
-    size_t sampleCount = state->frameCount * MAX_NUM_CHANNELS;
     while (e0) {
         // process by group of tracks with same output buffer to
         // avoid multiple memset() on same buffer
@@ -1374,7 +1510,7 @@
             }
             e0 &= ~(e1);
 
-            memset(t1.mainBuffer, 0, sampleCount
+            memset(t1.mainBuffer, 0, state->frameCount * t1.mMixerChannelCount
                     * audio_bytes_per_sample(t1.mMixerFormat));
         }
 
@@ -1459,8 +1595,8 @@
                     }
                     size_t inFrames = (t.frameCount > outFrames)?outFrames:t.frameCount;
                     if (inFrames > 0) {
-                        t.hook(&t, outTemp + (BLOCKSIZE-outFrames)*MAX_NUM_CHANNELS, inFrames,
-                                state->resampleTemp, aux);
+                        t.hook(&t, outTemp + (BLOCKSIZE - outFrames) * t.mMixerChannelCount,
+                                inFrames, state->resampleTemp, aux);
                         t.frameCount -= inFrames;
                         outFrames -= inFrames;
                         if (CC_UNLIKELY(aux != NULL)) {
@@ -1486,10 +1622,11 @@
             }
 
             convertMixerFormat(out, t1.mMixerFormat, outTemp, t1.mMixerInFormat,
-                    BLOCKSIZE * FCC_2);
+                    BLOCKSIZE * t1.mMixerChannelCount);
             // TODO: fix ugly casting due to choice of out pointer type
             out = reinterpret_cast<int32_t*>((uint8_t*)out
-                    + BLOCKSIZE * FCC_2 * audio_bytes_per_sample(t1.mMixerFormat));
+                    + BLOCKSIZE * t1.mMixerChannelCount
+                        * audio_bytes_per_sample(t1.mMixerFormat));
             numFrames += BLOCKSIZE;
         } while (numFrames < state->frameCount);
     }
@@ -1511,8 +1648,6 @@
     ALOGVV("process__genericResampling\n");
     // this const just means that local variable outTemp doesn't change
     int32_t* const outTemp = state->outputTemp;
-    const size_t size = sizeof(int32_t) * MAX_NUM_CHANNELS * state->frameCount;
-
     size_t numFrames = state->frameCount;
 
     uint32_t e0 = state->enabledTracks;
@@ -1533,7 +1668,7 @@
         }
         e0 &= ~(e1);
         int32_t *out = t1.mainBuffer;
-        memset(outTemp, 0, size);
+        memset(outTemp, 0, sizeof(*outTemp) * t1.mMixerChannelCount * state->frameCount);
         while (e1) {
             const int i = 31 - __builtin_clz(e1);
             e1 &= ~(1<<i);
@@ -1565,14 +1700,15 @@
                     if (CC_UNLIKELY(aux != NULL)) {
                         aux += outFrames;
                     }
-                    t.hook(&t, outTemp + outFrames*MAX_NUM_CHANNELS, t.buffer.frameCount,
+                    t.hook(&t, outTemp + outFrames * t.mMixerChannelCount, t.buffer.frameCount,
                             state->resampleTemp, aux);
                     outFrames += t.buffer.frameCount;
                     t.bufferProvider->releaseBuffer(&t.buffer);
                 }
             }
         }
-        convertMixerFormat(out, t1.mMixerFormat, outTemp, t1.mMixerInFormat, numFrames * FCC_2);
+        convertMixerFormat(out, t1.mMixerFormat,
+                outTemp, t1.mMixerInFormat, numFrames * t1.mMixerChannelCount);
     }
 }
 
@@ -1608,7 +1744,7 @@
         // been enabled for mixing.
         if (in == NULL || (((uintptr_t)in) & 3)) {
             memset(out, 0, numFrames
-                    * MAX_NUM_CHANNELS * audio_bytes_per_sample(t.mMixerFormat));
+                    * t.mMixerChannelCount * audio_bytes_per_sample(t.mMixerFormat));
             ALOGE_IF((((uintptr_t)in) & 3), "process stereo track: input buffer alignment pb: "
                                               "buffer %p track %d, channels %d, needs %08x",
                     in, i, t.channelCount, t.needs);
@@ -1780,56 +1916,134 @@
 /*static*/ void AudioMixer::sInitRoutine()
 {
     LocalClock lc;
-    sLocalTimeFreq = lc.getLocalFreq();
+    sLocalTimeFreq = lc.getLocalFreq(); // for the resampler
 
-    // find multichannel downmix effect if we have to play multichannel content
-    uint32_t numEffects = 0;
-    int ret = EffectQueryNumberEffects(&numEffects);
-    if (ret != 0) {
-        ALOGE("AudioMixer() error %d querying number of effects", ret);
-        return;
-    }
-    ALOGV("EffectQueryNumberEffects() numEffects=%d", numEffects);
-
-    for (uint32_t i = 0 ; i < numEffects ; i++) {
-        if (EffectQueryEffect(i, &sDwnmFxDesc) == 0) {
-            ALOGV("effect %d is called %s", i, sDwnmFxDesc.name);
-            if (memcmp(&sDwnmFxDesc.type, EFFECT_UIID_DOWNMIX, sizeof(effect_uuid_t)) == 0) {
-                ALOGI("found effect \"%s\" from %s",
-                        sDwnmFxDesc.name, sDwnmFxDesc.implementor);
-                sIsMultichannelCapable = true;
-                break;
-            }
-        }
-    }
-    ALOGW_IF(!sIsMultichannelCapable, "unable to find downmix effect");
+    DownmixerBufferProvider::init(); // for the downmixer
 }
 
-template <int MIXTYPE, int NCHAN, bool USEFLOATVOL, bool ADJUSTVOL,
+/* TODO: consider whether this level of optimization is necessary.
+ * Perhaps just stick with a single for loop.
+ */
+
+// Needs to derive a compile time constant (constexpr).  Could be targeted to go
+// to a MONOVOL mixtype based on MAX_NUM_VOLUMES, but that's an unnecessary complication.
+#define MIXTYPE_MONOVOL(mixtype) (mixtype == MIXTYPE_MULTI ? MIXTYPE_MULTI_MONOVOL : \
+        mixtype == MIXTYPE_MULTI_SAVEONLY ? MIXTYPE_MULTI_SAVEONLY_MONOVOL : mixtype)
+
+/* MIXTYPE     (see AudioMixerOps.h MIXTYPE_* enumeration)
+ * TO: int32_t (Q4.27) or float
+ * TI: int32_t (Q4.27) or int16_t (Q0.15) or float
+ * TA: int32_t (Q4.27)
+ */
+template <int MIXTYPE,
+        typename TO, typename TI, typename TV, typename TA, typename TAV>
+static void volumeRampMulti(uint32_t channels, TO* out, size_t frameCount,
+        const TI* in, TA* aux, TV *vol, const TV *volinc, TAV *vola, TAV volainc)
+{
+    switch (channels) {
+    case 1:
+        volumeRampMulti<MIXTYPE, 1>(out, frameCount, in, aux, vol, volinc, vola, volainc);
+        break;
+    case 2:
+        volumeRampMulti<MIXTYPE, 2>(out, frameCount, in, aux, vol, volinc, vola, volainc);
+        break;
+    case 3:
+        volumeRampMulti<MIXTYPE_MONOVOL(MIXTYPE), 3>(out,
+                frameCount, in, aux, vol, volinc, vola, volainc);
+        break;
+    case 4:
+        volumeRampMulti<MIXTYPE_MONOVOL(MIXTYPE), 4>(out,
+                frameCount, in, aux, vol, volinc, vola, volainc);
+        break;
+    case 5:
+        volumeRampMulti<MIXTYPE_MONOVOL(MIXTYPE), 5>(out,
+                frameCount, in, aux, vol, volinc, vola, volainc);
+        break;
+    case 6:
+        volumeRampMulti<MIXTYPE_MONOVOL(MIXTYPE), 6>(out,
+                frameCount, in, aux, vol, volinc, vola, volainc);
+        break;
+    case 7:
+        volumeRampMulti<MIXTYPE_MONOVOL(MIXTYPE), 7>(out,
+                frameCount, in, aux, vol, volinc, vola, volainc);
+        break;
+    case 8:
+        volumeRampMulti<MIXTYPE_MONOVOL(MIXTYPE), 8>(out,
+                frameCount, in, aux, vol, volinc, vola, volainc);
+        break;
+    }
+}
+
+/* MIXTYPE     (see AudioMixerOps.h MIXTYPE_* enumeration)
+ * TO: int32_t (Q4.27) or float
+ * TI: int32_t (Q4.27) or int16_t (Q0.15) or float
+ * TA: int32_t (Q4.27)
+ */
+template <int MIXTYPE,
+        typename TO, typename TI, typename TV, typename TA, typename TAV>
+static void volumeMulti(uint32_t channels, TO* out, size_t frameCount,
+        const TI* in, TA* aux, const TV *vol, TAV vola)
+{
+    switch (channels) {
+    case 1:
+        volumeMulti<MIXTYPE, 1>(out, frameCount, in, aux, vol, vola);
+        break;
+    case 2:
+        volumeMulti<MIXTYPE, 2>(out, frameCount, in, aux, vol, vola);
+        break;
+    case 3:
+        volumeMulti<MIXTYPE_MONOVOL(MIXTYPE), 3>(out, frameCount, in, aux, vol, vola);
+        break;
+    case 4:
+        volumeMulti<MIXTYPE_MONOVOL(MIXTYPE), 4>(out, frameCount, in, aux, vol, vola);
+        break;
+    case 5:
+        volumeMulti<MIXTYPE_MONOVOL(MIXTYPE), 5>(out, frameCount, in, aux, vol, vola);
+        break;
+    case 6:
+        volumeMulti<MIXTYPE_MONOVOL(MIXTYPE), 6>(out, frameCount, in, aux, vol, vola);
+        break;
+    case 7:
+        volumeMulti<MIXTYPE_MONOVOL(MIXTYPE), 7>(out, frameCount, in, aux, vol, vola);
+        break;
+    case 8:
+        volumeMulti<MIXTYPE_MONOVOL(MIXTYPE), 8>(out, frameCount, in, aux, vol, vola);
+        break;
+    }
+}
+
+/* MIXTYPE     (see AudioMixerOps.h MIXTYPE_* enumeration)
+ * USEFLOATVOL (set to true if float volume is used)
+ * ADJUSTVOL   (set to true if volume ramp parameters needs adjustment afterwards)
+ * TO: int32_t (Q4.27) or float
+ * TI: int32_t (Q4.27) or int16_t (Q0.15) or float
+ * TA: int32_t (Q4.27)
+ */
+template <int MIXTYPE, bool USEFLOATVOL, bool ADJUSTVOL,
     typename TO, typename TI, typename TA>
 void AudioMixer::volumeMix(TO *out, size_t outFrames,
         const TI *in, TA *aux, bool ramp, AudioMixer::track_t *t)
 {
     if (USEFLOATVOL) {
         if (ramp) {
-            volumeRampMulti<MIXTYPE, NCHAN>(out, outFrames, in, aux,
+            volumeRampMulti<MIXTYPE>(t->mMixerChannelCount, out, outFrames, in, aux,
                     t->mPrevVolume, t->mVolumeInc, &t->prevAuxLevel, t->auxInc);
             if (ADJUSTVOL) {
                 t->adjustVolumeRamp(aux != NULL, true);
             }
         } else {
-            volumeMulti<MIXTYPE, NCHAN>(out, outFrames, in, aux,
+            volumeMulti<MIXTYPE>(t->mMixerChannelCount, out, outFrames, in, aux,
                     t->mVolume, t->auxLevel);
         }
     } else {
         if (ramp) {
-            volumeRampMulti<MIXTYPE, NCHAN>(out, outFrames, in, aux,
+            volumeRampMulti<MIXTYPE>(t->mMixerChannelCount, out, outFrames, in, aux,
                     t->prevVolume, t->volumeInc, &t->prevAuxLevel, t->auxInc);
             if (ADJUSTVOL) {
                 t->adjustVolumeRamp(aux != NULL);
             }
         } else {
-            volumeMulti<MIXTYPE, NCHAN>(out, outFrames, in, aux,
+            volumeMulti<MIXTYPE>(t->mMixerChannelCount, out, outFrames, in, aux,
                     t->volume, t->auxLevel);
         }
     }
@@ -1838,8 +2052,13 @@
 /* This process hook is called when there is a single track without
  * aux buffer, volume ramp, or resampling.
  * TODO: Update the hook selection: this can properly handle aux and ramp.
+ *
+ * MIXTYPE     (see AudioMixerOps.h MIXTYPE_* enumeration)
+ * TO: int32_t (Q4.27) or float
+ * TI: int32_t (Q4.27) or int16_t (Q0.15) or float
+ * TA: int32_t (Q4.27)
  */
-template <int MIXTYPE, int NCHAN, typename TO, typename TI, typename TA>
+template <int MIXTYPE, typename TO, typename TI, typename TA>
 void AudioMixer::process_NoResampleOneTrack(state_t* state, int64_t pts)
 {
     ALOGVV("process_NoResampleOneTrack\n");
@@ -1847,6 +2066,7 @@
     const int i = 31 - __builtin_clz(state->enabledTracks);
     ALOG_ASSERT((1 << i) == state->enabledTracks, "more than 1 track enabled");
     track_t *t = &state->tracks[i];
+    const uint32_t channels = t->mMixerChannelCount;
     TO* out = reinterpret_cast<TO*>(t->mainBuffer);
     TA* aux = reinterpret_cast<TA*>(t->auxBuffer);
     const bool ramp = t->needsRamp();
@@ -1863,7 +2083,7 @@
         // been enabled for mixing.
         if (in == NULL || (((uintptr_t)in) & 3)) {
             memset(out, 0, numFrames
-                    * NCHAN * audio_bytes_per_sample(t->mMixerFormat));
+                    * channels * audio_bytes_per_sample(t->mMixerFormat));
             ALOGE_IF((((uintptr_t)in) & 3), "process_NoResampleOneTrack: bus error: "
                     "buffer %p track %p, channels %d, needs %#x",
                     in, t, t->channelCount, t->needs);
@@ -1871,12 +2091,12 @@
         }
 
         const size_t outFrames = b.frameCount;
-        volumeMix<MIXTYPE, NCHAN, is_same<TI, float>::value, false> (out,
-                outFrames, in, aux, ramp, t);
+        volumeMix<MIXTYPE, is_same<TI, float>::value, false> (
+                out, outFrames, in, aux, ramp, t);
 
-        out += outFrames * NCHAN;
+        out += outFrames * channels;
         if (aux != NULL) {
-            aux += NCHAN;
+            aux += channels;
         }
         numFrames -= b.frameCount;
 
@@ -1890,24 +2110,28 @@
 
 /* This track hook is called to do resampling then mixing,
  * pulling from the track's upstream AudioBufferProvider.
+ *
+ * MIXTYPE     (see AudioMixerOps.h MIXTYPE_* enumeration)
+ * TO: int32_t (Q4.27) or float
+ * TI: int32_t (Q4.27) or int16_t (Q0.15) or float
+ * TA: int32_t (Q4.27)
  */
-template <int MIXTYPE, int NCHAN, typename TO, typename TI, typename TA>
+template <int MIXTYPE, typename TO, typename TI, typename TA>
 void AudioMixer::track__Resample(track_t* t, TO* out, size_t outFrameCount, TO* temp, TA* aux)
 {
     ALOGVV("track__Resample\n");
     t->resampler->setSampleRate(t->sampleRate);
-
     const bool ramp = t->needsRamp();
     if (ramp || aux != NULL) {
         // if ramp:        resample with unity gain to temp buffer and scale/mix in 2nd step.
         // if aux != NULL: resample with unity gain to temp buffer then apply send level.
 
         t->resampler->setVolume(UNITY_GAIN_FLOAT, UNITY_GAIN_FLOAT);
-        memset(temp, 0, outFrameCount * NCHAN * sizeof(TO));
+        memset(temp, 0, outFrameCount * t->mMixerChannelCount * sizeof(TO));
         t->resampler->resample((int32_t*)temp, outFrameCount, t->bufferProvider);
 
-        volumeMix<MIXTYPE, NCHAN, is_same<TI, float>::value, true>(out, outFrameCount,
-                temp, aux, ramp, t);
+        volumeMix<MIXTYPE, is_same<TI, float>::value, true>(
+                out, outFrameCount, temp, aux, ramp, t);
 
     } else { // constant volume gain
         t->resampler->setVolume(t->mVolume[0], t->mVolume[1]);
@@ -1917,20 +2141,25 @@
 
 /* This track hook is called to mix a track, when no resampling is required.
  * The input buffer should be present in t->in.
+ *
+ * MIXTYPE     (see AudioMixerOps.h MIXTYPE_* enumeration)
+ * TO: int32_t (Q4.27) or float
+ * TI: int32_t (Q4.27) or int16_t (Q0.15) or float
+ * TA: int32_t (Q4.27)
  */
-template <int MIXTYPE, int NCHAN, typename TO, typename TI, typename TA>
+template <int MIXTYPE, typename TO, typename TI, typename TA>
 void AudioMixer::track__NoResample(track_t* t, TO* out, size_t frameCount,
         TO* temp __unused, TA* aux)
 {
     ALOGVV("track__NoResample\n");
     const TI *in = static_cast<const TI *>(t->in);
 
-    volumeMix<MIXTYPE, NCHAN, is_same<TI, float>::value, true>(out, frameCount,
-            in, aux, t->needsRamp(), t);
+    volumeMix<MIXTYPE, is_same<TI, float>::value, true>(
+            out, frameCount, in, aux, t->needsRamp(), t);
 
     // MIXTYPE_MONOEXPAND reads a single input channel and expands to NCHAN output channels.
     // MIXTYPE_MULTI reads NCHAN input channels and places to NCHAN output channels.
-    in += (MIXTYPE == MIXTYPE_MONOEXPAND) ? frameCount : frameCount * NCHAN;
+    in += (MIXTYPE == MIXTYPE_MONOEXPAND) ? frameCount : frameCount * t->mMixerChannelCount;
     t->in = in;
 }
 
@@ -1977,10 +2206,10 @@
 
 /* Returns the proper track hook to use for mixing the track into the output buffer.
  */
-AudioMixer::hook_t AudioMixer::getTrackHook(int trackType, int channels,
+AudioMixer::hook_t AudioMixer::getTrackHook(int trackType, uint32_t channelCount,
         audio_format_t mixerInFormat, audio_format_t mixerOutFormat __unused)
 {
-    if (!kUseNewMixer && channels == FCC_2 && mixerInFormat == AUDIO_FORMAT_PCM_16_BIT) {
+    if (!kUseNewMixer && channelCount == FCC_2 && mixerInFormat == AUDIO_FORMAT_PCM_16_BIT) {
         switch (trackType) {
         case TRACKTYPE_NOP:
             return track__nop;
@@ -1995,7 +2224,7 @@
             break;
         }
     }
-    LOG_ALWAYS_FATAL_IF(channels != FCC_2); // TODO: must be stereo right now
+    LOG_ALWAYS_FATAL_IF(channelCount > MAX_NUM_CHANNELS);
     switch (trackType) {
     case TRACKTYPE_NOP:
         return track__nop;
@@ -2003,10 +2232,10 @@
         switch (mixerInFormat) {
         case AUDIO_FORMAT_PCM_FLOAT:
             return (AudioMixer::hook_t)
-                    track__Resample<MIXTYPE_MULTI, 2, float, float, int32_t>;
+                    track__Resample<MIXTYPE_MULTI, float /*TO*/, float /*TI*/, int32_t /*TA*/>;
         case AUDIO_FORMAT_PCM_16_BIT:
             return (AudioMixer::hook_t)\
-                    track__Resample<MIXTYPE_MULTI, 2, int32_t, int16_t, int32_t>;
+                    track__Resample<MIXTYPE_MULTI, int32_t, int16_t, int32_t>;
         default:
             LOG_ALWAYS_FATAL("bad mixerInFormat: %#x", mixerInFormat);
             break;
@@ -2016,10 +2245,10 @@
         switch (mixerInFormat) {
         case AUDIO_FORMAT_PCM_FLOAT:
             return (AudioMixer::hook_t)
-                    track__NoResample<MIXTYPE_MONOEXPAND, 2, float, float, int32_t>;
+                    track__NoResample<MIXTYPE_MONOEXPAND, float, float, int32_t>;
         case AUDIO_FORMAT_PCM_16_BIT:
             return (AudioMixer::hook_t)
-                    track__NoResample<MIXTYPE_MONOEXPAND, 2, int32_t, int16_t, int32_t>;
+                    track__NoResample<MIXTYPE_MONOEXPAND, int32_t, int16_t, int32_t>;
         default:
             LOG_ALWAYS_FATAL("bad mixerInFormat: %#x", mixerInFormat);
             break;
@@ -2029,10 +2258,10 @@
         switch (mixerInFormat) {
         case AUDIO_FORMAT_PCM_FLOAT:
             return (AudioMixer::hook_t)
-                    track__NoResample<MIXTYPE_MULTI, 2, float, float, int32_t>;
+                    track__NoResample<MIXTYPE_MULTI, float, float, int32_t>;
         case AUDIO_FORMAT_PCM_16_BIT:
             return (AudioMixer::hook_t)
-                    track__NoResample<MIXTYPE_MULTI, 2, int32_t, int16_t, int32_t>;
+                    track__NoResample<MIXTYPE_MULTI, int32_t, int16_t, int32_t>;
         default:
             LOG_ALWAYS_FATAL("bad mixerInFormat: %#x", mixerInFormat);
             break;
@@ -2048,25 +2277,25 @@
 /* Returns the proper process hook for mixing tracks. Currently works only for
  * PROCESSTYPE_NORESAMPLEONETRACK, a mix involving one track, no resampling.
  */
-AudioMixer::process_hook_t AudioMixer::getProcessHook(int processType, int channels,
+AudioMixer::process_hook_t AudioMixer::getProcessHook(int processType, uint32_t channelCount,
         audio_format_t mixerInFormat, audio_format_t mixerOutFormat)
 {
     if (processType != PROCESSTYPE_NORESAMPLEONETRACK) { // Only NORESAMPLEONETRACK
         LOG_ALWAYS_FATAL("bad processType: %d", processType);
         return NULL;
     }
-    if (!kUseNewMixer && channels == FCC_2 && mixerInFormat == AUDIO_FORMAT_PCM_16_BIT) {
+    if (!kUseNewMixer && channelCount == FCC_2 && mixerInFormat == AUDIO_FORMAT_PCM_16_BIT) {
         return process__OneTrack16BitsStereoNoResampling;
     }
-    LOG_ALWAYS_FATAL_IF(channels != FCC_2); // TODO: must be stereo right now
+    LOG_ALWAYS_FATAL_IF(channelCount > MAX_NUM_CHANNELS);
     switch (mixerInFormat) {
     case AUDIO_FORMAT_PCM_FLOAT:
         switch (mixerOutFormat) {
         case AUDIO_FORMAT_PCM_FLOAT:
-            return process_NoResampleOneTrack<MIXTYPE_MULTI_SAVEONLY, 2,
-                    float, float, int32_t>;
+            return process_NoResampleOneTrack<MIXTYPE_MULTI_SAVEONLY,
+                    float /*TO*/, float /*TI*/, int32_t /*TA*/>;
         case AUDIO_FORMAT_PCM_16_BIT:
-            return process_NoResampleOneTrack<MIXTYPE_MULTI_SAVEONLY, 2,
+            return process_NoResampleOneTrack<MIXTYPE_MULTI_SAVEONLY,
                     int16_t, float, int32_t>;
         default:
             LOG_ALWAYS_FATAL("bad mixerOutFormat: %#x", mixerOutFormat);
@@ -2076,10 +2305,10 @@
     case AUDIO_FORMAT_PCM_16_BIT:
         switch (mixerOutFormat) {
         case AUDIO_FORMAT_PCM_FLOAT:
-            return process_NoResampleOneTrack<MIXTYPE_MULTI_SAVEONLY, 2,
+            return process_NoResampleOneTrack<MIXTYPE_MULTI_SAVEONLY,
                     float, int16_t, int32_t>;
         case AUDIO_FORMAT_PCM_16_BIT:
-            return process_NoResampleOneTrack<MIXTYPE_MULTI_SAVEONLY, 2,
+            return process_NoResampleOneTrack<MIXTYPE_MULTI_SAVEONLY,
                     int16_t, int16_t, int32_t>;
         default:
             LOG_ALWAYS_FATAL("bad mixerOutFormat: %#x", mixerOutFormat);
diff --git a/services/audioflinger/AudioMixer.h b/services/audioflinger/AudioMixer.h
index a9f4761..5ba377b 100644
--- a/services/audioflinger/AudioMixer.h
+++ b/services/audioflinger/AudioMixer.h
@@ -26,7 +26,7 @@
 #include <media/AudioBufferProvider.h>
 #include "AudioResampler.h"
 
-#include <audio_effects/effect_downmix.h>
+#include <hardware/audio_effect.h>
 #include <system/audio.h>
 #include <media/nbaio/NBLog.h>
 
@@ -51,12 +51,11 @@
     static const uint32_t MAX_NUM_TRACKS = 32;
     // maximum number of channels supported by the mixer
 
-    // This mixer has a hard-coded upper limit of 2 channels for output.
-    // There is support for > 2 channel tracks down-mixed to 2 channel output via a down-mix effect.
-    // Adding support for > 2 channel output would require more than simply changing this value.
-    static const uint32_t MAX_NUM_CHANNELS = 2;
+    // This mixer has a hard-coded upper limit of 8 channels for output.
+    static const uint32_t MAX_NUM_CHANNELS = 8;
+    static const uint32_t MAX_NUM_VOLUMES = 2; // stereo volume only
     // maximum number of channels supported for the content
-    static const uint32_t MAX_NUM_CHANNELS_TO_DOWNMIX = 8;
+    static const uint32_t MAX_NUM_CHANNELS_TO_DOWNMIX = AUDIO_CHANNEL_COUNT_MAX;
 
     static const uint16_t UNITY_GAIN_INT = 0x1000;
     static const float    UNITY_GAIN_FLOAT = 1.0f;
@@ -82,6 +81,7 @@
         AUX_BUFFER      = 0x4003,
         DOWNMIX_TYPE    = 0X4004,
         MIXER_FORMAT    = 0x4005, // AUDIO_FORMAT_PCM_(FLOAT|16_BIT)
+        MIXER_CHANNEL_MASK = 0x4006, // Channel mask for mixer output
         // for target RESAMPLE
         SAMPLE_RATE     = 0x4100, // Configure sample rate conversion on this track name;
                                   // parameter 'value' is the new sample rate in Hz.
@@ -153,8 +153,7 @@
 
     struct state_t;
     struct track_t;
-    class DownmixerBufferProvider;
-    class ReformatBufferProvider;
+    class CopyBufferProvider;
 
     typedef void (*hook_t)(track_t* t, int32_t* output, size_t numOutFrames, int32_t* temp,
                            int32_t* aux);
@@ -165,15 +164,15 @@
 
         // TODO: Eventually remove legacy integer volume settings
         union {
-        int16_t     volume[MAX_NUM_CHANNELS]; // U4.12 fixed point (top bit should be zero)
+        int16_t     volume[MAX_NUM_VOLUMES]; // U4.12 fixed point (top bit should be zero)
         int32_t     volumeRL;
         };
 
-        int32_t     prevVolume[MAX_NUM_CHANNELS];
+        int32_t     prevVolume[MAX_NUM_VOLUMES];
 
         // 16-byte boundary
 
-        int32_t     volumeInc[MAX_NUM_CHANNELS];
+        int32_t     volumeInc[MAX_NUM_VOLUMES];
         int32_t     auxInc;
         int32_t     prevAuxLevel;
 
@@ -206,9 +205,9 @@
         int32_t*           auxBuffer;
 
         // 16-byte boundary
-        AudioBufferProvider*     mInputBufferProvider;    // 4 bytes
-        ReformatBufferProvider*  mReformatBufferProvider; // 4 bytes
-        DownmixerBufferProvider* downmixerBufferProvider; // 4 bytes
+        AudioBufferProvider*     mInputBufferProvider;    // externally provided buffer provider.
+        CopyBufferProvider*      mReformatBufferProvider; // provider wrapper for reformatting.
+        CopyBufferProvider*      downmixerBufferProvider; // wrapper for channel conversion.
 
         int32_t     sessionId;
 
@@ -218,18 +217,20 @@
         audio_format_t mMixerInFormat;   // mix internal format AUDIO_FORMAT_PCM_(FLOAT|16_BIT)
                                          // each track must be converted to this format.
 
-        float          mVolume[MAX_NUM_CHANNELS];     // floating point set volume
-        float          mPrevVolume[MAX_NUM_CHANNELS]; // floating point previous volume
-        float          mVolumeInc[MAX_NUM_CHANNELS];  // floating point volume increment
+        float          mVolume[MAX_NUM_VOLUMES];     // floating point set volume
+        float          mPrevVolume[MAX_NUM_VOLUMES]; // floating point previous volume
+        float          mVolumeInc[MAX_NUM_VOLUMES];  // floating point volume increment
 
         float          mAuxLevel;                     // floating point set aux level
         float          mPrevAuxLevel;                 // floating point prev aux level
         float          mAuxInc;                       // floating point aux increment
 
         // 16-byte boundary
+        audio_channel_mask_t mMixerChannelMask;
+        uint32_t             mMixerChannelCount;
 
         bool        needsRamp() { return (volumeInc[0] | volumeInc[1] | auxInc) != 0; }
-        bool        setResampler(uint32_t sampleRate, uint32_t devSampleRate);
+        bool        setResampler(uint32_t trackSampleRate, uint32_t devSampleRate);
         bool        doesResample() const { return resampler != NULL; }
         void        resetResampler() { if (resampler != NULL) resampler->reset(); }
         void        adjustVolumeRamp(bool aux, bool useFloat = false);
@@ -253,48 +254,112 @@
         track_t         tracks[MAX_NUM_TRACKS] __attribute__((aligned(32)));
     };
 
-    // AudioBufferProvider that wraps a track AudioBufferProvider by a call to a downmix effect
-    class DownmixerBufferProvider : public AudioBufferProvider {
+    // Base AudioBufferProvider class used for DownMixerBufferProvider, RemixBufferProvider,
+    // and ReformatBufferProvider.
+    // It handles a private buffer for use in converting format or channel masks from the
+    // input data to a form acceptable by the mixer.
+    // TODO: Make a ResamplerBufferProvider when integers are entirely removed from the
+    // processing pipeline.
+    class CopyBufferProvider : public AudioBufferProvider {
     public:
-        virtual status_t getNextBuffer(Buffer* buffer, int64_t pts);
-        virtual void releaseBuffer(Buffer* buffer);
-        DownmixerBufferProvider();
-        virtual ~DownmixerBufferProvider();
+        // Use a private buffer of bufferFrameCount frames (each frame is outputFrameSize bytes).
+        // If bufferFrameCount is 0, no private buffer is created and in-place modification of
+        // the upstream buffer provider's buffers is performed by copyFrames().
+        CopyBufferProvider(size_t inputFrameSize, size_t outputFrameSize,
+                size_t bufferFrameCount);
+        virtual ~CopyBufferProvider();
 
-        AudioBufferProvider* mTrackBufferProvider;
-        effect_handle_t    mDownmixHandle;
-        effect_config_t    mDownmixConfig;
-    };
-
-    // AudioBufferProvider wrapper that reformats track to acceptable mixer input type
-    class ReformatBufferProvider : public AudioBufferProvider {
-    public:
-        ReformatBufferProvider(int32_t channels,
-                audio_format_t inputFormat, audio_format_t outputFormat);
-        virtual ~ReformatBufferProvider();
-
-        // overrides AudioBufferProvider methods
+        // Overrides AudioBufferProvider methods
         virtual status_t getNextBuffer(Buffer* buffer, int64_t pts);
         virtual void releaseBuffer(Buffer* buffer);
 
-        void reset();
-        inline bool requiresInternalBuffers() {
-            return true; //mInputFrameSize < mOutputFrameSize;
+        // Other public methods
+
+        // call this to release the buffer to the upstream provider.
+        // treat it as an audio discontinuity for future samples.
+        virtual void reset();
+
+        // this function should be supplied by the derived class.  It converts
+        // #frames in the *src pointer to the *dst pointer.  It is public because
+        // some providers will allow this to work on arbitrary buffers outside
+        // of the internal buffers.
+        virtual void copyFrames(void *dst, const void *src, size_t frames) = 0;
+
+        // set the upstream buffer provider. Consider calling "reset" before this function.
+        void setBufferProvider(AudioBufferProvider *p) {
+            mTrackBufferProvider = p;
         }
 
+    protected:
         AudioBufferProvider* mTrackBufferProvider;
-        int32_t              mChannels;
-        audio_format_t       mInputFormat;
-        audio_format_t       mOutputFormat;
-        size_t               mInputFrameSize;
-        size_t               mOutputFrameSize;
-        // (only) required for reformatting to a larger size.
+        const size_t         mInputFrameSize;
+        const size_t         mOutputFrameSize;
+    private:
         AudioBufferProvider::Buffer mBuffer;
-        void*                mOutputData;
-        size_t               mOutputCount;
+        const size_t         mLocalBufferFrameCount;
+        void*                mLocalBufferData;
         size_t               mConsumed;
     };
 
+    // DownmixerBufferProvider wraps a track AudioBufferProvider to provide
+    // position dependent downmixing by an Audio Effect.
+    class DownmixerBufferProvider : public CopyBufferProvider {
+    public:
+        DownmixerBufferProvider(audio_channel_mask_t inputChannelMask,
+                audio_channel_mask_t outputChannelMask, audio_format_t format,
+                uint32_t sampleRate, int32_t sessionId, size_t bufferFrameCount);
+        virtual ~DownmixerBufferProvider();
+        virtual void copyFrames(void *dst, const void *src, size_t frames);
+        bool isValid() const { return mDownmixHandle != NULL; }
+
+        static status_t init();
+        static bool isMultichannelCapable() { return sIsMultichannelCapable; }
+
+    protected:
+        effect_handle_t    mDownmixHandle;
+        effect_config_t    mDownmixConfig;
+
+        // effect descriptor for the downmixer used by the mixer
+        static effect_descriptor_t sDwnmFxDesc;
+        // indicates whether a downmix effect has been found and is usable by this mixer
+        static bool                sIsMultichannelCapable;
+        // FIXME: should we allow effects outside of the framework?
+        // We need to here. A special ioId that must be <= -2 so it does not map to a session.
+        static const int32_t SESSION_ID_INVALID_AND_IGNORED = -2;
+    };
+
+    // RemixBufferProvider wraps a track AudioBufferProvider to perform an
+    // upmix or downmix to the proper channel count and mask.
+    class RemixBufferProvider : public CopyBufferProvider {
+    public:
+        RemixBufferProvider(audio_channel_mask_t inputChannelMask,
+                audio_channel_mask_t outputChannelMask, audio_format_t format,
+                size_t bufferFrameCount);
+        virtual void copyFrames(void *dst, const void *src, size_t frames);
+
+    protected:
+        const audio_format_t mFormat;
+        const size_t         mSampleSize;
+        const size_t         mInputChannels;
+        const size_t         mOutputChannels;
+        int8_t               mIdxAry[sizeof(uint32_t)*8]; // 32 bits => channel indices
+    };
+
+    // ReformatBufferProvider wraps a track AudioBufferProvider to convert the input data
+    // to an acceptable mixer input format type.
+    class ReformatBufferProvider : public CopyBufferProvider {
+    public:
+        ReformatBufferProvider(int32_t channels,
+                audio_format_t inputFormat, audio_format_t outputFormat,
+                size_t bufferFrameCount);
+        virtual void copyFrames(void *dst, const void *src, size_t frames);
+
+    protected:
+        const int32_t        mChannels;
+        const audio_format_t mInputFormat;
+        const audio_format_t mOutputFormat;
+    };
+
     // bitmask of allocated track names, where bit 0 corresponds to TRACK0 etc.
     uint32_t        mTrackNames;
 
@@ -310,16 +375,15 @@
 private:
     state_t         mState __attribute__((aligned(32)));
 
-    // effect descriptor for the downmixer used by the mixer
-    static effect_descriptor_t sDwnmFxDesc;
-    // indicates whether a downmix effect has been found and is usable by this mixer
-    static bool                sIsMultichannelCapable;
-
     // Call after changing either the enabled status of a track, or parameters of an enabled track.
     // OK to call more often than that, but unnecessary.
     void invalidateState(uint32_t mask);
 
-    static status_t initTrackDownmix(track_t* pTrack, int trackNum, audio_channel_mask_t mask);
+    bool setChannelMasks(int name,
+            audio_channel_mask_t trackChannelMask, audio_channel_mask_t mixerChannelMask);
+
+    // TODO: remove unused trackName/trackNum from functions below.
+    static status_t initTrackDownmix(track_t* pTrack, int trackName);
     static status_t prepareTrackForDownmix(track_t* pTrack, int trackNum);
     static void unprepareTrackForDownmix(track_t* pTrack, int trackName);
     static status_t prepareTrackForReformat(track_t* pTrack, int trackNum);
@@ -360,27 +424,26 @@
      * in AudioMixerOps.h).  The template parameters are as follows:
      *
      *   MIXTYPE     (see AudioMixerOps.h MIXTYPE_* enumeration)
-     *   NCHAN       (number of channels, 2 for now)
      *   USEFLOATVOL (set to true if float volume is used)
      *   ADJUSTVOL   (set to true if volume ramp parameters needs adjustment afterwards)
      *   TO: int32_t (Q4.27) or float
      *   TI: int32_t (Q4.27) or int16_t (Q0.15) or float
      *   TA: int32_t (Q4.27)
      */
-    template <int MIXTYPE, int NCHAN, bool USEFLOATVOL, bool ADJUSTVOL,
+    template <int MIXTYPE, bool USEFLOATVOL, bool ADJUSTVOL,
         typename TO, typename TI, typename TA>
     static void volumeMix(TO *out, size_t outFrames,
             const TI *in, TA *aux, bool ramp, AudioMixer::track_t *t);
 
     // multi-format process hooks
-    template <int MIXTYPE, int NCHAN, typename TO, typename TI, typename TA>
+    template <int MIXTYPE, typename TO, typename TI, typename TA>
     static void process_NoResampleOneTrack(state_t* state, int64_t pts);
 
     // multi-format track hooks
-    template <int MIXTYPE, int NCHAN, typename TO, typename TI, typename TA>
+    template <int MIXTYPE, typename TO, typename TI, typename TA>
     static void track__Resample(track_t* t, TO* out, size_t frameCount,
             TO* temp __unused, TA* aux);
-    template <int MIXTYPE, int NCHAN, typename TO, typename TI, typename TA>
+    template <int MIXTYPE, typename TO, typename TI, typename TA>
     static void track__NoResample(track_t* t, TO* out, size_t frameCount,
             TO* temp __unused, TA* aux);
 
@@ -399,9 +462,9 @@
     };
 
     // functions for determining the proper process and track hooks.
-    static process_hook_t getProcessHook(int processType, int channels,
+    static process_hook_t getProcessHook(int processType, uint32_t channelCount,
             audio_format_t mixerInFormat, audio_format_t mixerOutFormat);
-    static hook_t getTrackHook(int trackType, int channels,
+    static hook_t getTrackHook(int trackType, uint32_t channelCount,
             audio_format_t mixerInFormat, audio_format_t mixerOutFormat);
 };
 
diff --git a/services/audioflinger/AudioMixerOps.h b/services/audioflinger/AudioMixerOps.h
index ad739ff..f7376a8 100644
--- a/services/audioflinger/AudioMixerOps.h
+++ b/services/audioflinger/AudioMixerOps.h
@@ -184,7 +184,7 @@
 template <typename TO, typename TI>
 inline void MixAccum(TO *auxaccum, TI value) {
     if (!is_same<TO, TI>::value) {
-        LOG_ALWAYS_FATAL("MixAccum type not properly specialized: %d %d\n",
+        LOG_ALWAYS_FATAL("MixAccum type not properly specialized: %zu %zu\n",
                 sizeof(TO), sizeof(TI));
     }
     *auxaccum += value;
@@ -230,6 +230,8 @@
     MIXTYPE_MULTI,
     MIXTYPE_MONOEXPAND,
     MIXTYPE_MULTI_SAVEONLY,
+    MIXTYPE_MULTI_MONOVOL,
+    MIXTYPE_MULTI_SAVEONLY_MONOVOL,
 };
 
 /*
@@ -263,6 +265,13 @@
  *   vol: represents a volume array.
  *
  *   MIXTYPE_MULTI_SAVEONLY does not accumulate into the out pointer.
+ *
+ * MIXTYPE_MULTI_MONOVOL:
+ *   Same as MIXTYPE_MULTI, but uses only volume[0].
+ *
+ * MIXTYPE_MULTI_SAVEONLY_MONOVOL:
+ *   Same as MIXTYPE_MULTI_SAVEONLY, but uses only volume[0].
+ *
  */
 
 template <int MIXTYPE, int NCHAN,
@@ -283,12 +292,6 @@
                     vol[i] += volinc[i];
                 }
                 break;
-            case MIXTYPE_MULTI_SAVEONLY:
-                for (int i = 0; i < NCHAN; ++i) {
-                    *out++ = MixMulAux<TO, TI, TV, TA>(*in++, vol[i], &auxaccum);
-                    vol[i] += volinc[i];
-                }
-                break;
             case MIXTYPE_MONOEXPAND:
                 for (int i = 0; i < NCHAN; ++i) {
                     *out++ += MixMulAux<TO, TI, TV, TA>(*in, vol[i], &auxaccum);
@@ -296,6 +299,24 @@
                 }
                 in++;
                 break;
+            case MIXTYPE_MULTI_SAVEONLY:
+                for (int i = 0; i < NCHAN; ++i) {
+                    *out++ = MixMulAux<TO, TI, TV, TA>(*in++, vol[i], &auxaccum);
+                    vol[i] += volinc[i];
+                }
+                break;
+            case MIXTYPE_MULTI_MONOVOL:
+                for (int i = 0; i < NCHAN; ++i) {
+                    *out++ += MixMulAux<TO, TI, TV, TA>(*in++, vol[0], &auxaccum);
+                }
+                vol[0] += volinc[0];
+                break;
+            case MIXTYPE_MULTI_SAVEONLY_MONOVOL:
+                for (int i = 0; i < NCHAN; ++i) {
+                    *out++ = MixMulAux<TO, TI, TV, TA>(*in++, vol[0], &auxaccum);
+                }
+                vol[0] += volinc[0];
+                break;
             default:
                 LOG_ALWAYS_FATAL("invalid mixtype %d", MIXTYPE);
                 break;
@@ -313,12 +334,6 @@
                     vol[i] += volinc[i];
                 }
                 break;
-            case MIXTYPE_MULTI_SAVEONLY:
-                for (int i = 0; i < NCHAN; ++i) {
-                    *out++ = MixMul<TO, TI, TV>(*in++, vol[i]);
-                    vol[i] += volinc[i];
-                }
-                break;
             case MIXTYPE_MONOEXPAND:
                 for (int i = 0; i < NCHAN; ++i) {
                     *out++ += MixMul<TO, TI, TV>(*in, vol[i]);
@@ -326,6 +341,24 @@
                 }
                 in++;
                 break;
+            case MIXTYPE_MULTI_SAVEONLY:
+                for (int i = 0; i < NCHAN; ++i) {
+                    *out++ = MixMul<TO, TI, TV>(*in++, vol[i]);
+                    vol[i] += volinc[i];
+                }
+                break;
+            case MIXTYPE_MULTI_MONOVOL:
+                for (int i = 0; i < NCHAN; ++i) {
+                    *out++ += MixMul<TO, TI, TV>(*in++, vol[0]);
+                }
+                vol[0] += volinc[0];
+                break;
+            case MIXTYPE_MULTI_SAVEONLY_MONOVOL:
+                for (int i = 0; i < NCHAN; ++i) {
+                    *out++ = MixMul<TO, TI, TV>(*in++, vol[0]);
+                }
+                vol[0] += volinc[0];
+                break;
             default:
                 LOG_ALWAYS_FATAL("invalid mixtype %d", MIXTYPE);
                 break;
@@ -351,17 +384,27 @@
                     *out++ += MixMulAux<TO, TI, TV, TA>(*in++, vol[i], &auxaccum);
                 }
                 break;
-            case MIXTYPE_MULTI_SAVEONLY:
-                for (int i = 0; i < NCHAN; ++i) {
-                    *out++ = MixMulAux<TO, TI, TV, TA>(*in++, vol[i], &auxaccum);
-                }
-                break;
             case MIXTYPE_MONOEXPAND:
                 for (int i = 0; i < NCHAN; ++i) {
                     *out++ += MixMulAux<TO, TI, TV, TA>(*in, vol[i], &auxaccum);
                 }
                 in++;
                 break;
+            case MIXTYPE_MULTI_SAVEONLY:
+                for (int i = 0; i < NCHAN; ++i) {
+                    *out++ = MixMulAux<TO, TI, TV, TA>(*in++, vol[i], &auxaccum);
+                }
+                break;
+            case MIXTYPE_MULTI_MONOVOL:
+                for (int i = 0; i < NCHAN; ++i) {
+                    *out++ += MixMulAux<TO, TI, TV, TA>(*in++, vol[0], &auxaccum);
+                }
+                break;
+            case MIXTYPE_MULTI_SAVEONLY_MONOVOL:
+                for (int i = 0; i < NCHAN; ++i) {
+                    *out++ = MixMulAux<TO, TI, TV, TA>(*in++, vol[0], &auxaccum);
+                }
+                break;
             default:
                 LOG_ALWAYS_FATAL("invalid mixtype %d", MIXTYPE);
                 break;
@@ -377,17 +420,27 @@
                     *out++ += MixMul<TO, TI, TV>(*in++, vol[i]);
                 }
                 break;
-            case MIXTYPE_MULTI_SAVEONLY:
-                for (int i = 0; i < NCHAN; ++i) {
-                    *out++ = MixMul<TO, TI, TV>(*in++, vol[i]);
-                }
-                break;
             case MIXTYPE_MONOEXPAND:
                 for (int i = 0; i < NCHAN; ++i) {
                     *out++ += MixMul<TO, TI, TV>(*in, vol[i]);
                 }
                 in++;
                 break;
+            case MIXTYPE_MULTI_SAVEONLY:
+                for (int i = 0; i < NCHAN; ++i) {
+                    *out++ = MixMul<TO, TI, TV>(*in++, vol[i]);
+                }
+                break;
+            case MIXTYPE_MULTI_MONOVOL:
+                for (int i = 0; i < NCHAN; ++i) {
+                    *out++ += MixMul<TO, TI, TV>(*in++, vol[0]);
+                }
+                break;
+            case MIXTYPE_MULTI_SAVEONLY_MONOVOL:
+                for (int i = 0; i < NCHAN; ++i) {
+                    *out++ = MixMul<TO, TI, TV>(*in++, vol[0]);
+                }
+                break;
             default:
                 LOG_ALWAYS_FATAL("invalid mixtype %d", MIXTYPE);
                 break;
diff --git a/services/audioflinger/AudioResamplerDyn.cpp b/services/audioflinger/AudioResamplerDyn.cpp
index e201ff8..159ab70 100644
--- a/services/audioflinger/AudioResamplerDyn.cpp
+++ b/services/audioflinger/AudioResamplerDyn.cpp
@@ -82,17 +82,17 @@
 void AudioResamplerDyn<TC, TI, TO>::InBuffer::resize(int CHANNELS, int halfNumCoefs)
 {
     // calculate desired state size
-    int stateCount = halfNumCoefs * CHANNELS * 2 * kStateSizeMultipleOfFilterLength;
+    size_t stateCount = halfNumCoefs * CHANNELS * 2 * kStateSizeMultipleOfFilterLength;
 
     // check if buffer needs resizing
     if (mState
             && stateCount == mStateCount
-            && mRingFull-mState == mStateCount-halfNumCoefs*CHANNELS) {
+            && mRingFull-mState == (ssize_t) (mStateCount-halfNumCoefs*CHANNELS)) {
         return;
     }
 
     // create new buffer
-    TI* state;
+    TI* state = NULL;
     (void)posix_memalign(reinterpret_cast<void**>(&state), 32, stateCount*sizeof(*state));
     memset(state, 0, stateCount*sizeof(*state));
 
@@ -213,7 +213,7 @@
 void AudioResamplerDyn<TC, TI, TO>::createKaiserFir(Constants &c,
         double stopBandAtten, int inSampleRate, int outSampleRate, double tbwCheat)
 {
-    TC* buf;
+    TC* buf = NULL;
     static const double atten = 0.9998;   // to avoid ripple overflow
     double fcr;
     double tbw = firKaiserTbw(c.mHalfNumCoefs, stopBandAtten);
diff --git a/services/audioflinger/AudioResamplerFirProcess.h b/services/audioflinger/AudioResamplerFirProcess.h
index bb0f1c9..efc8055 100644
--- a/services/audioflinger/AudioResamplerFirProcess.h
+++ b/services/audioflinger/AudioResamplerFirProcess.h
@@ -109,40 +109,25 @@
     }
 };
 
-/*
- * Helper template functions for interpolating filter coefficients.
- */
-
-template<typename TC, typename T>
-void adjustLerp(T& lerpP __unused)
-{
-}
-
-template<int32_t, typename T>
-void adjustLerp(T& lerpP)
-{
-    lerpP >>= 16;   // lerpP is 32bit for NEON int32_t, but always 16 bit for non-NEON path
-}
-
 template<typename TC, typename TINTERP>
-static inline
+inline
 TC interpolate(TC coef_0, TC coef_1, TINTERP lerp)
 {
     return lerp * (coef_1 - coef_0) + coef_0;
 }
 
-template<int16_t, uint32_t>
-static inline
-int16_t interpolate(int16_t coef_0, int16_t coef_1, uint32_t lerp)
-{
-    return (static_cast<int16_t>(lerp) * ((coef_1-coef_0)<<1)>>16) + coef_0;
+template<>
+inline
+int16_t interpolate<int16_t, uint32_t>(int16_t coef_0, int16_t coef_1, uint32_t lerp)
+{   // in some CPU architectures 16b x 16b multiplies are faster.
+    return (static_cast<int16_t>(lerp) * static_cast<int16_t>(coef_1 - coef_0) >> 15) + coef_0;
 }
 
-template<int32_t, uint32_t>
-static inline
-int32_t interpolate(int32_t coef_0, int32_t coef_1, uint32_t lerp)
+template<>
+inline
+int32_t interpolate<int32_t, uint32_t>(int32_t coef_0, int32_t coef_1, uint32_t lerp)
 {
-    return mulAdd(static_cast<int16_t>(lerp), (coef_1-coef_0)<<1, coef_0);
+    return (lerp * static_cast<int64_t>(coef_1 - coef_0) >> 31) + coef_0;
 }
 
 /* class scope for passing in functions into templates */
@@ -192,7 +177,7 @@
 template <int CHANNELS, int STRIDE, typename TFUNC, typename TC, typename TI, typename TO, typename TINTERP>
 static inline
 void ProcessBase(TO* const out,
-        int count,
+        size_t count,
         const TC* coefsP,
         const TC* coefsN,
         const TI* sP,
@@ -283,7 +268,6 @@
         TINTERP lerpP,
         const TO* const volumeLR)
 {
-    adjustLerp<TC, TINTERP>(lerpP); // coefficient type adjustment for interpolations
     ProcessBase<CHANNELS, STRIDE, InterpCompute>(out, count, coefsP, coefsN, sP, sN, lerpP, volumeLR);
 }
 
diff --git a/services/audioflinger/FastMixer.cpp b/services/audioflinger/FastMixer.cpp
index c486630..9e15293 100644
--- a/services/audioflinger/FastMixer.cpp
+++ b/services/audioflinger/FastMixer.cpp
@@ -55,6 +55,7 @@
     mixer(NULL),
     mSinkBuffer(NULL),
     mSinkBufferSize(0),
+    mSinkChannelCount(FCC_2),
     mMixerBuffer(NULL),
     mMixerBufferSize(0),
     mMixerBufferFormat(AUDIO_FORMAT_PCM_16_BIT),
@@ -71,6 +72,9 @@
     current = &initial;
 
     mDummyDumpState = &dummyDumpState;
+    // TODO: Add channel mask to NBAIO_Format.
+    // We assume that the channel mask must be a valid positional channel mask.
+    mSinkChannelMask = audio_channel_out_mask_from_count(mSinkChannelCount);
 
     unsigned i;
     for (i = 0; i < FastMixerState::kMaxFastTracks; ++i) {
@@ -148,10 +152,17 @@
         if (outputSink == NULL) {
             format = Format_Invalid;
             sampleRate = 0;
+            mSinkChannelCount = 0;
+            mSinkChannelMask = AUDIO_CHANNEL_NONE;
         } else {
             format = outputSink->format();
             sampleRate = Format_sampleRate(format);
-            ALOG_ASSERT(Format_channelCount(format) == FCC_2);
+            mSinkChannelCount = Format_channelCount(format);
+            LOG_ALWAYS_FATAL_IF(mSinkChannelCount > AudioMixer::MAX_NUM_CHANNELS);
+
+            // TODO: Add channel mask to NBAIO_Format
+            // We assume that the channel mask must be a valid positional channel mask.
+            mSinkChannelMask = audio_channel_out_mask_from_count(mSinkChannelCount);
         }
         dumpState->mSampleRate = sampleRate;
     }
@@ -169,10 +180,12 @@
             //       implementation; it would be better to have normal mixer allocate for us
             //       to avoid blocking here and to prevent possible priority inversion
             mixer = new AudioMixer(frameCount, sampleRate, FastMixerState::kMaxFastTracks);
-            const size_t mixerFrameSize = FCC_2 * audio_bytes_per_sample(mMixerBufferFormat);
+            const size_t mixerFrameSize = mSinkChannelCount
+                    * audio_bytes_per_sample(mMixerBufferFormat);
             mMixerBufferSize = mixerFrameSize * frameCount;
             (void)posix_memalign(&mMixerBuffer, 32, mMixerBufferSize);
-            const size_t sinkFrameSize = FCC_2 * audio_bytes_per_sample(format.mFormat);
+            const size_t sinkFrameSize = mSinkChannelCount
+                    * audio_bytes_per_sample(format.mFormat);
             if (sinkFrameSize > mixerFrameSize) { // need a sink buffer
                 mSinkBufferSize = sinkFrameSize * frameCount;
                 (void)posix_memalign(&mSinkBuffer, 32, mSinkBufferSize);
@@ -244,7 +257,7 @@
                 fastTrackNames[i] = name;
                 mixer->setBufferProvider(name, bufferProvider);
                 mixer->setParameter(name, AudioMixer::TRACK, AudioMixer::MAIN_BUFFER,
-                        (void *) mMixerBuffer);
+                        (void *)mMixerBuffer);
                 // newly allocated track names default to full scale volume
                 mixer->setParameter(
                         name,
@@ -252,6 +265,10 @@
                         AudioMixer::MIXER_FORMAT, (void *)mMixerBufferFormat);
                 mixer->setParameter(name, AudioMixer::TRACK, AudioMixer::FORMAT,
                         (void *)(uintptr_t)fastTrack->mFormat);
+                mixer->setParameter(name, AudioMixer::TRACK, AudioMixer::CHANNEL_MASK,
+                        (void *)(uintptr_t)fastTrack->mChannelMask);
+                mixer->setParameter(name, AudioMixer::TRACK, AudioMixer::MIXER_CHANNEL_MASK,
+                        (void *)(uintptr_t)mSinkChannelMask);
                 mixer->enable(name);
             }
             generations[i] = fastTrack->mGeneration;
@@ -286,7 +303,9 @@
                     mixer->setParameter(name, AudioMixer::TRACK, AudioMixer::FORMAT,
                             (void *)(uintptr_t)fastTrack->mFormat);
                     mixer->setParameter(name, AudioMixer::TRACK, AudioMixer::CHANNEL_MASK,
-                            (void *)(uintptr_t) fastTrack->mChannelMask);
+                            (void *)(uintptr_t)fastTrack->mChannelMask);
+                    mixer->setParameter(name, AudioMixer::TRACK, AudioMixer::MIXER_CHANNEL_MASK,
+                            (void *)(uintptr_t)mSinkChannelMask);
                     // already enabled
                 }
                 generations[i] = fastTrack->mGeneration;
diff --git a/services/audioflinger/FastMixer.h b/services/audioflinger/FastMixer.h
index 4671670..fde8c2b 100644
--- a/services/audioflinger/FastMixer.h
+++ b/services/audioflinger/FastMixer.h
@@ -66,6 +66,8 @@
     void* mSinkBuffer;                  // used for mixer output format translation
                                         // if sink format is different than mixer output.
     size_t mSinkBufferSize;
+    uint32_t mSinkChannelCount;
+    audio_channel_mask_t mSinkChannelMask;
     void* mMixerBuffer;                 // mixer output buffer.
     size_t mMixerBufferSize;
     audio_format_t mMixerBufferFormat;  // mixer output format: AUDIO_FORMAT_PCM_(16_BIT|FLOAT).
diff --git a/services/audioflinger/PatchPanel.cpp b/services/audioflinger/PatchPanel.cpp
index 6d84296..49422a9 100644
--- a/services/audioflinger/PatchPanel.cpp
+++ b/services/audioflinger/PatchPanel.cpp
@@ -142,102 +142,187 @@
     ALOGV("createAudioPatch() num_sources %d num_sinks %d handle %d",
           patch->num_sources, patch->num_sinks, *handle);
     status_t status = NO_ERROR;
-
     audio_patch_handle_t halHandle = AUDIO_PATCH_HANDLE_NONE;
-
     sp<AudioFlinger> audioflinger = mAudioFlinger.promote();
     if (audioflinger == 0) {
         return NO_INIT;
     }
+
     if (handle == NULL || patch == NULL) {
         return BAD_VALUE;
     }
-    // limit number of sources to 1 for now
-    if (patch->num_sources == 0 || patch->num_sources > 1 ||
+    // limit number of sources to 1 for now or 2 sources for special cross hw module case.
+    // only the audio policy manager can request a patch creation with 2 sources.
+    if (patch->num_sources == 0 || patch->num_sources > 2 ||
             patch->num_sinks == 0 || patch->num_sinks > AUDIO_PATCH_PORTS_MAX) {
         return BAD_VALUE;
     }
 
-    for (size_t index = 0; *handle != 0 && index < mPatches.size(); index++) {
-        if (*handle == mPatches[index]->mHandle) {
-            ALOGV("createAudioPatch() removing patch handle %d", *handle);
-            halHandle = mPatches[index]->mHalHandle;
-            mPatches.removeAt(index);
-            break;
+    if (*handle != AUDIO_PATCH_HANDLE_NONE) {
+        for (size_t index = 0; *handle != 0 && index < mPatches.size(); index++) {
+            if (*handle == mPatches[index]->mHandle) {
+                ALOGV("createAudioPatch() removing patch handle %d", *handle);
+                halHandle = mPatches[index]->mHalHandle;
+                mPatches.removeAt(index);
+                break;
+            }
         }
     }
 
+    Patch *newPatch = new Patch(patch);
+
     switch (patch->sources[0].type) {
         case AUDIO_PORT_TYPE_DEVICE: {
             // limit number of sinks to 1 for now
             if (patch->num_sinks > 1) {
-                return BAD_VALUE;
+                status = BAD_VALUE;
+                goto exit;
             }
             audio_module_handle_t src_module = patch->sources[0].ext.device.hw_module;
             ssize_t index = audioflinger->mAudioHwDevs.indexOfKey(src_module);
             if (index < 0) {
                 ALOGW("createAudioPatch() bad src hw module %d", src_module);
-                return BAD_VALUE;
+                status = BAD_VALUE;
+                goto exit;
             }
             AudioHwDevice *audioHwDevice = audioflinger->mAudioHwDevs.valueAt(index);
             for (unsigned int i = 0; i < patch->num_sinks; i++) {
                 // reject connection to different sink types
                 if (patch->sinks[i].type != patch->sinks[0].type) {
                     ALOGW("createAudioPatch() different sink types in same patch not supported");
-                    return BAD_VALUE;
+                    status = BAD_VALUE;
+                    goto exit;
                 }
-                // limit to connections between sinks and sources on same HW module
-                if (patch->sinks[i].ext.mix.hw_module != src_module) {
-                    ALOGW("createAudioPatch() cannot connect source on module %d to "
-                            "sink on module %d", src_module, patch->sinks[i].ext.mix.hw_module);
-                    return BAD_VALUE;
-                }
-
-                // limit to connections between devices and output streams for HAL before 3.0
-                if ((audioHwDevice->version() < AUDIO_DEVICE_API_VERSION_3_0) &&
+                // limit to connections between devices and input streams for HAL before 3.0
+                if (patch->sinks[i].ext.mix.hw_module == src_module &&
+                        (audioHwDevice->version() < AUDIO_DEVICE_API_VERSION_3_0) &&
                         (patch->sinks[i].type != AUDIO_PORT_TYPE_MIX)) {
                     ALOGW("createAudioPatch() invalid sink type %d for device source",
                           patch->sinks[i].type);
-                    return BAD_VALUE;
+                    status = BAD_VALUE;
+                    goto exit;
                 }
             }
 
-            if (audioHwDevice->version() >= AUDIO_DEVICE_API_VERSION_3_0) {
-                if (patch->sinks[0].type == AUDIO_PORT_TYPE_MIX) {
-                    sp<ThreadBase> thread = audioflinger->checkRecordThread_l(
-                                                                    patch->sinks[0].ext.mix.handle);
-                    if (thread == 0) {
-                        ALOGW("createAudioPatch() bad capture I/O handle %d",
-                                                                  patch->sinks[0].ext.mix.handle);
-                        return BAD_VALUE;
+            if (patch->sinks[0].ext.device.hw_module != src_module) {
+                // limit to device to device connection if not on same hw module
+                if (patch->sinks[0].type != AUDIO_PORT_TYPE_DEVICE) {
+                    ALOGW("createAudioPatch() invalid sink type for cross hw module");
+                    status = INVALID_OPERATION;
+                    goto exit;
+                }
+                // special case num sources == 2 -=> reuse an exiting output mix to connect to the
+                // sink
+                if (patch->num_sources == 2) {
+                    if (patch->sources[1].type != AUDIO_PORT_TYPE_MIX ||
+                            patch->sinks[0].ext.device.hw_module !=
+                                    patch->sources[1].ext.mix.hw_module) {
+                        ALOGW("createAudioPatch() invalid source combination");
+                        status = INVALID_OPERATION;
+                        goto exit;
                     }
-                    status = thread->sendCreateAudioPatchConfigEvent(patch, &halHandle);
+
+                    sp<ThreadBase> thread =
+                            audioflinger->checkPlaybackThread_l(patch->sources[1].ext.mix.handle);
+                    newPatch->mPlaybackThread = (MixerThread *)thread.get();
+                    if (thread == 0) {
+                        ALOGW("createAudioPatch() cannot get playback thread");
+                        status = INVALID_OPERATION;
+                        goto exit;
+                    }
                 } else {
-                    audio_hw_device_t *hwDevice = audioHwDevice->hwDevice();
-                    status = hwDevice->create_audio_patch(hwDevice,
-                                                           patch->num_sources,
-                                                           patch->sources,
-                                                           patch->num_sinks,
-                                                           patch->sinks,
-                                                           &halHandle);
+                    audio_config_t config = AUDIO_CONFIG_INITIALIZER;
+                    audio_devices_t device = patch->sinks[0].ext.device.type;
+                    String8 address = String8(patch->sinks[0].ext.device.address);
+                    audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
+                    newPatch->mPlaybackThread = audioflinger->openOutput_l(
+                                                             patch->sinks[0].ext.device.hw_module,
+                                                             &output,
+                                                             &config,
+                                                             device,
+                                                             address,
+                                                             AUDIO_OUTPUT_FLAG_NONE);
+                    ALOGV("audioflinger->openOutput_l() returned %p",
+                                          newPatch->mPlaybackThread.get());
+                    if (newPatch->mPlaybackThread == 0) {
+                        status = NO_MEMORY;
+                        goto exit;
+                    }
+                }
+                uint32_t channelCount = newPatch->mPlaybackThread->channelCount();
+                audio_devices_t device = patch->sources[0].ext.device.type;
+                String8 address = String8(patch->sources[0].ext.device.address);
+                audio_config_t config = AUDIO_CONFIG_INITIALIZER;
+                audio_channel_mask_t inChannelMask = audio_channel_in_mask_from_count(channelCount);
+                config.sample_rate = newPatch->mPlaybackThread->sampleRate();
+                config.channel_mask = inChannelMask;
+                config.format = newPatch->mPlaybackThread->format();
+                audio_io_handle_t input = AUDIO_IO_HANDLE_NONE;
+                newPatch->mRecordThread = audioflinger->openInput_l(src_module,
+                                                                    &input,
+                                                                    &config,
+                                                                    device,
+                                                                    address,
+                                                                    AUDIO_SOURCE_MIC,
+                                                                    AUDIO_INPUT_FLAG_NONE);
+                ALOGV("audioflinger->openInput_l() returned %p inChannelMask %08x",
+                      newPatch->mRecordThread.get(), inChannelMask);
+                if (newPatch->mRecordThread == 0) {
+                    status = NO_MEMORY;
+                    goto exit;
+                }
+                status = createPatchConnections(newPatch, patch);
+                if (status != NO_ERROR) {
+                    goto exit;
                 }
             } else {
-                sp<ThreadBase> thread = audioflinger->checkRecordThread_l(
-                                                                    patch->sinks[0].ext.mix.handle);
-                if (thread == 0) {
-                    ALOGW("createAudioPatch() bad capture I/O handle %d",
-                                                                  patch->sinks[0].ext.mix.handle);
-                    return BAD_VALUE;
-                }
-                AudioParameter param;
-                param.addInt(String8(AudioParameter::keyRouting),
-                             (int)patch->sources[0].ext.device.type);
-                param.addInt(String8(AudioParameter::keyInputSource),
+                if (audioHwDevice->version() >= AUDIO_DEVICE_API_VERSION_3_0) {
+                    if (patch->sinks[0].type == AUDIO_PORT_TYPE_MIX) {
+                        sp<ThreadBase> thread = audioflinger->checkRecordThread_l(
+                                                                        patch->sinks[0].ext.mix.handle);
+                        if (thread == 0) {
+                            ALOGW("createAudioPatch() bad capture I/O handle %d",
+                                                                      patch->sinks[0].ext.mix.handle);
+                            status = BAD_VALUE;
+                            goto exit;
+                        }
+                        status = thread->sendCreateAudioPatchConfigEvent(patch, &halHandle);
+                    } else {
+                        audio_hw_device_t *hwDevice = audioHwDevice->hwDevice();
+                        status = hwDevice->create_audio_patch(hwDevice,
+                                                               patch->num_sources,
+                                                               patch->sources,
+                                                               patch->num_sinks,
+                                                               patch->sinks,
+                                                               &halHandle);
+                    }
+                } else {
+                    sp<ThreadBase> thread = audioflinger->checkRecordThread_l(
+                                                                        patch->sinks[0].ext.mix.handle);
+                    if (thread == 0) {
+                        ALOGW("createAudioPatch() bad capture I/O handle %d",
+                                                                      patch->sinks[0].ext.mix.handle);
+                        status = BAD_VALUE;
+                        goto exit;
+                    }
+                    char *address;
+                    if (strcmp(patch->sources[0].ext.device.address, "") != 0) {
+                        address = audio_device_address_to_parameter(
+                                                            patch->sources[0].ext.device.type,
+                                                            patch->sources[0].ext.device.address);
+                    } else {
+                        address = (char *)calloc(1, 1);
+                    }
+                    AudioParameter param = AudioParameter(String8(address));
+                    free(address);
+                    param.addInt(String8(AUDIO_PARAMETER_STREAM_ROUTING),
+                                 (int)patch->sources[0].ext.device.type);
+                    param.addInt(String8(AUDIO_PARAMETER_STREAM_INPUT_SOURCE),
                                                      (int)patch->sinks[0].ext.mix.usecase.source);
-
-                ALOGV("createAudioPatch() AUDIO_PORT_TYPE_DEVICE setParameters %s",
+                    ALOGV("createAudioPatch() AUDIO_PORT_TYPE_DEVICE setParameters %s",
                                                                       param.toString().string());
-                status = thread->setParameters(param.toString());
+                    status = thread->setParameters(param.toString());
+                }
             }
         } break;
         case AUDIO_PORT_TYPE_MIX: {
@@ -245,18 +330,21 @@
             ssize_t index = audioflinger->mAudioHwDevs.indexOfKey(src_module);
             if (index < 0) {
                 ALOGW("createAudioPatch() bad src hw module %d", src_module);
-                return BAD_VALUE;
+                status = BAD_VALUE;
+                goto exit;
             }
             // limit to connections between devices and output streams
             for (unsigned int i = 0; i < patch->num_sinks; i++) {
                 if (patch->sinks[i].type != AUDIO_PORT_TYPE_DEVICE) {
-                    ALOGW("createAudioPatch() invalid sink type %d for bus source",
+                    ALOGW("createAudioPatch() invalid sink type %d for mix source",
                           patch->sinks[i].type);
-                    return BAD_VALUE;
+                    status = BAD_VALUE;
+                    goto exit;
                 }
                 // limit to connections between sinks and sources on same HW module
                 if (patch->sinks[i].ext.device.hw_module != src_module) {
-                    return BAD_VALUE;
+                    status = BAD_VALUE;
+                    goto exit;
                 }
             }
             AudioHwDevice *audioHwDevice = audioflinger->mAudioHwDevs.valueAt(index);
@@ -265,7 +353,8 @@
             if (thread == 0) {
                 ALOGW("createAudioPatch() bad playback I/O handle %d",
                           patch->sources[0].ext.mix.handle);
-                return BAD_VALUE;
+                status = BAD_VALUE;
+                goto exit;
             }
             if (audioHwDevice->version() >= AUDIO_DEVICE_API_VERSION_3_0) {
                 status = thread->sendCreateAudioPatchConfigEvent(patch, &halHandle);
@@ -274,27 +363,178 @@
                 for (unsigned int i = 0; i < patch->num_sinks; i++) {
                     type |= patch->sinks[i].ext.device.type;
                 }
-                AudioParameter param;
-                param.addInt(String8(AudioParameter::keyRouting), (int)type);
+                char *address;
+                if (strcmp(patch->sinks[0].ext.device.address, "") != 0) {
+                    address = audio_device_address_to_parameter(
+                                                                patch->sinks[0].ext.device.type,
+                                                                patch->sinks[0].ext.device.address);
+                } else {
+                    address = (char *)calloc(1, 1);
+                }
+                AudioParameter param = AudioParameter(String8(address));
+                free(address);
+                param.addInt(String8(AUDIO_PARAMETER_STREAM_ROUTING), (int)type);
                 status = thread->setParameters(param.toString());
             }
 
         } break;
         default:
-            return BAD_VALUE;
+            status = BAD_VALUE;
+            goto exit;
     }
+exit:
     ALOGV("createAudioPatch() status %d", status);
     if (status == NO_ERROR) {
         *handle = audioflinger->nextUniqueId();
-        Patch *newPatch = new Patch(patch);
         newPatch->mHandle = *handle;
         newPatch->mHalHandle = halHandle;
         mPatches.add(newPatch);
         ALOGV("createAudioPatch() added new patch handle %d halHandle %d", *handle, halHandle);
+    } else {
+        clearPatchConnections(newPatch);
+        delete newPatch;
     }
     return status;
 }
 
+status_t AudioFlinger::PatchPanel::createPatchConnections(Patch *patch,
+                                                          const struct audio_patch *audioPatch)
+{
+    // create patch from source device to record thread input
+    struct audio_patch subPatch;
+    subPatch.num_sources = 1;
+    subPatch.sources[0] = audioPatch->sources[0];
+    subPatch.num_sinks = 1;
+
+    patch->mRecordThread->getAudioPortConfig(&subPatch.sinks[0]);
+    subPatch.sinks[0].ext.mix.usecase.source = AUDIO_SOURCE_MIC;
+
+    status_t status = createAudioPatch(&subPatch, &patch->mRecordPatchHandle);
+    if (status != NO_ERROR) {
+        patch->mRecordPatchHandle = AUDIO_PATCH_HANDLE_NONE;
+        return status;
+    }
+
+    // create patch from playback thread output to sink device
+    patch->mPlaybackThread->getAudioPortConfig(&subPatch.sources[0]);
+    subPatch.sinks[0] = audioPatch->sinks[0];
+    status = createAudioPatch(&subPatch, &patch->mPlaybackPatchHandle);
+    if (status != NO_ERROR) {
+        patch->mPlaybackPatchHandle = AUDIO_PATCH_HANDLE_NONE;
+        return status;
+    }
+
+    // use a pseudo LCM between input and output framecount
+    size_t playbackFrameCount = patch->mPlaybackThread->frameCount();
+    int playbackShift = __builtin_ctz(playbackFrameCount);
+    size_t recordFramecount = patch->mRecordThread->frameCount();
+    int shift = __builtin_ctz(recordFramecount);
+    if (playbackShift < shift) {
+        shift = playbackShift;
+    }
+    size_t frameCount = (playbackFrameCount * recordFramecount) >> shift;
+    ALOGV("createPatchConnections() playframeCount %d recordFramecount %d frameCount %d ",
+          playbackFrameCount, recordFramecount, frameCount);
+
+    // create a special record track to capture from record thread
+    uint32_t channelCount = patch->mPlaybackThread->channelCount();
+    audio_channel_mask_t inChannelMask = audio_channel_in_mask_from_count(channelCount);
+    audio_channel_mask_t outChannelMask = patch->mPlaybackThread->channelMask();
+    uint32_t sampleRate = patch->mPlaybackThread->sampleRate();
+    audio_format_t format = patch->mPlaybackThread->format();
+
+    patch->mPatchRecord = new RecordThread::PatchRecord(
+                                             patch->mRecordThread.get(),
+                                             sampleRate,
+                                             inChannelMask,
+                                             format,
+                                             frameCount,
+                                             NULL,
+                                             IAudioFlinger::TRACK_DEFAULT);
+    if (patch->mPatchRecord == 0) {
+        return NO_MEMORY;
+    }
+    status = patch->mPatchRecord->initCheck();
+    if (status != NO_ERROR) {
+        return status;
+    }
+    patch->mRecordThread->addPatchRecord(patch->mPatchRecord);
+
+    // create a special playback track to render to playback thread.
+    // this track is given the same buffer as the PatchRecord buffer
+    patch->mPatchTrack = new PlaybackThread::PatchTrack(
+                                           patch->mPlaybackThread.get(),
+                                           sampleRate,
+                                           outChannelMask,
+                                           format,
+                                           frameCount,
+                                           patch->mPatchRecord->buffer(),
+                                           IAudioFlinger::TRACK_DEFAULT);
+    if (patch->mPatchTrack == 0) {
+        return NO_MEMORY;
+    }
+    status = patch->mPatchTrack->initCheck();
+    if (status != NO_ERROR) {
+        return status;
+    }
+    patch->mPlaybackThread->addPatchTrack(patch->mPatchTrack);
+
+    // tie playback and record tracks together
+    patch->mPatchRecord->setPeerProxy(patch->mPatchTrack.get());
+    patch->mPatchTrack->setPeerProxy(patch->mPatchRecord.get());
+
+    // start capture and playback
+    patch->mPatchRecord->start(AudioSystem::SYNC_EVENT_NONE, 0);
+    patch->mPatchTrack->start();
+
+    return status;
+}
+
+void AudioFlinger::PatchPanel::clearPatchConnections(Patch *patch)
+{
+    sp<AudioFlinger> audioflinger = mAudioFlinger.promote();
+    if (audioflinger == 0) {
+        return;
+    }
+
+    ALOGV("clearPatchConnections() patch->mRecordPatchHandle %d patch->mPlaybackPatchHandle %d",
+          patch->mRecordPatchHandle, patch->mPlaybackPatchHandle);
+
+    if (patch->mPatchRecord != 0) {
+        patch->mPatchRecord->stop();
+    }
+    if (patch->mPatchTrack != 0) {
+        patch->mPatchTrack->stop();
+    }
+    if (patch->mRecordPatchHandle != AUDIO_PATCH_HANDLE_NONE) {
+        releaseAudioPatch(patch->mRecordPatchHandle);
+        patch->mRecordPatchHandle = AUDIO_PATCH_HANDLE_NONE;
+    }
+    if (patch->mPlaybackPatchHandle != AUDIO_PATCH_HANDLE_NONE) {
+        releaseAudioPatch(patch->mPlaybackPatchHandle);
+        patch->mPlaybackPatchHandle = AUDIO_PATCH_HANDLE_NONE;
+    }
+    if (patch->mRecordThread != 0) {
+        if (patch->mPatchRecord != 0) {
+            patch->mRecordThread->deletePatchRecord(patch->mPatchRecord);
+            patch->mPatchRecord.clear();
+        }
+        audioflinger->closeInputInternal_l(patch->mRecordThread);
+        patch->mRecordThread.clear();
+    }
+    if (patch->mPlaybackThread != 0) {
+        if (patch->mPatchTrack != 0) {
+            patch->mPlaybackThread->deletePatchTrack(patch->mPatchTrack);
+            patch->mPatchTrack.clear();
+        }
+        // if num sources == 2 we are reusing an existing playback thread so we do not close it
+        if (patch->mAudioPatch.num_sources != 2) {
+            audioflinger->closeOutputInternal_l(patch->mPlaybackThread);
+        }
+        patch->mPlaybackThread.clear();
+    }
+}
+
 /* Disconnect a patch */
 status_t AudioFlinger::PatchPanel::releaseAudioPatch(audio_patch_handle_t handle)
 {
@@ -315,8 +555,10 @@
     if (index == mPatches.size()) {
         return BAD_VALUE;
     }
+    Patch *removedPatch = mPatches[index];
+    mPatches.removeAt(index);
 
-    struct audio_patch *patch = &mPatches[index]->mAudioPatch;
+    struct audio_patch *patch = &removedPatch->mAudioPatch;
 
     switch (patch->sources[0].type) {
         case AUDIO_PORT_TYPE_DEVICE: {
@@ -327,13 +569,20 @@
                 status = BAD_VALUE;
                 break;
             }
+
+            if (patch->sinks[0].type == AUDIO_PORT_TYPE_DEVICE &&
+                    patch->sinks[0].ext.device.hw_module != src_module) {
+                clearPatchConnections(removedPatch);
+                break;
+            }
+
             AudioHwDevice *audioHwDevice = audioflinger->mAudioHwDevs.valueAt(index);
             if (audioHwDevice->version() >= AUDIO_DEVICE_API_VERSION_3_0) {
                 if (patch->sinks[0].type == AUDIO_PORT_TYPE_MIX) {
                     sp<ThreadBase> thread = audioflinger->checkRecordThread_l(
                                                                     patch->sinks[0].ext.mix.handle);
                     if (thread == 0) {
-                        ALOGW("createAudioPatch() bad capture I/O handle %d",
+                        ALOGW("releaseAudioPatch() bad capture I/O handle %d",
                                                                   patch->sinks[0].ext.mix.handle);
                         status = BAD_VALUE;
                         break;
@@ -353,7 +602,7 @@
                     break;
                 }
                 AudioParameter param;
-                param.addInt(String8(AudioParameter::keyRouting), 0);
+                param.addInt(String8(AUDIO_PARAMETER_STREAM_ROUTING), 0);
                 ALOGV("releaseAudioPatch() AUDIO_PORT_TYPE_DEVICE setParameters %s",
                                                                       param.toString().string());
                 status = thread->setParameters(param.toString());
@@ -380,7 +629,7 @@
                 status = thread->sendReleaseAudioPatchConfigEvent(mPatches[index]->mHalHandle);
             } else {
                 AudioParameter param;
-                param.addInt(String8(AudioParameter::keyRouting), (int)0);
+                param.addInt(String8(AUDIO_PARAMETER_STREAM_ROUTING), 0);
                 status = thread->setParameters(param.toString());
             }
         } break;
@@ -389,8 +638,7 @@
             break;
     }
 
-    delete (mPatches[index]);
-    mPatches.removeAt(index);
+    delete removedPatch;
     return status;
 }
 
diff --git a/services/audioflinger/PatchPanel.h b/services/audioflinger/PatchPanel.h
index 7f78621..e31179c 100644
--- a/services/audioflinger/PatchPanel.h
+++ b/services/audioflinger/PatchPanel.h
@@ -21,6 +21,9 @@
 
 class PatchPanel : public RefBase {
 public:
+
+    class Patch;
+
     PatchPanel(const sp<AudioFlinger>& audioFlinger);
     virtual ~PatchPanel();
 
@@ -45,16 +48,31 @@
     /* Set audio port configuration */
     status_t setAudioPortConfig(const struct audio_port_config *config);
 
+    status_t createPatchConnections(Patch *patch,
+                                    const struct audio_patch *audioPatch);
+    void clearPatchConnections(Patch *patch);
+
     class Patch {
     public:
         Patch(const struct audio_patch *patch) :
-            mAudioPatch(*patch), mHandle(0), mHalHandle(0) {}
+            mAudioPatch(*patch), mHandle(AUDIO_PATCH_HANDLE_NONE),
+            mHalHandle(AUDIO_PATCH_HANDLE_NONE), mRecordPatchHandle(AUDIO_PATCH_HANDLE_NONE),
+            mPlaybackPatchHandle(AUDIO_PATCH_HANDLE_NONE) {}
+        ~Patch() {}
 
-        struct audio_patch mAudioPatch;
-        audio_patch_handle_t mHandle;
-        audio_patch_handle_t mHalHandle;
+        struct audio_patch              mAudioPatch;
+        audio_patch_handle_t            mHandle;
+        audio_patch_handle_t            mHalHandle;
+        sp<PlaybackThread>              mPlaybackThread;
+        sp<PlaybackThread::PatchTrack>  mPatchTrack;
+        sp<RecordThread>                mRecordThread;
+        sp<RecordThread::PatchRecord>   mPatchRecord;
+        audio_patch_handle_t            mRecordPatchHandle;
+        audio_patch_handle_t            mPlaybackPatchHandle;
+
     };
+
 private:
-    const wp<AudioFlinger>  mAudioFlinger;
-    SortedVector <Patch *> mPatches;
+    const wp<AudioFlinger>      mAudioFlinger;
+    SortedVector <Patch *>      mPatches;
 };
diff --git a/services/audioflinger/PlaybackTracks.h b/services/audioflinger/PlaybackTracks.h
index 79bdfe8..ee48276 100644
--- a/services/audioflinger/PlaybackTracks.h
+++ b/services/audioflinger/PlaybackTracks.h
@@ -29,10 +29,12 @@
                                 audio_format_t format,
                                 audio_channel_mask_t channelMask,
                                 size_t frameCount,
+                                void *buffer,
                                 const sp<IMemory>& sharedBuffer,
                                 int sessionId,
                                 int uid,
-                                IAudioFlinger::track_flags_t flags);
+                                IAudioFlinger::track_flags_t flags,
+                                track_type type);
     virtual             ~Track();
     virtual status_t    initCheck() const;
 
@@ -100,10 +102,6 @@
     bool isResumePending();
     void resumeAck();
 
-    bool isOutputTrack() const {
-        return (mStreamType == AUDIO_STREAM_CNT);
-    }
-
     sp<IMemory> sharedBuffer() const { return mSharedBuffer; }
 
     // framesWritten is cumulative, never reset, and is shared all tracks
@@ -115,7 +113,6 @@
     void triggerEvents(AudioSystem::sync_event_t type);
     void invalidate();
     bool isInvalid() const { return mIsInvalid; }
-    virtual bool isTimedTrack() const { return false; }
     int fastIndex() const { return mFastIndex; }
 
 protected:
@@ -163,7 +160,6 @@
     bool                mPreviousValid;
     uint32_t            mPreviousFramesWritten;
     AudioTimestamp      mPreviousTimestamp;
-
 };  // end of Track
 
 class TimedTrack : public Track {
@@ -195,7 +191,6 @@
     };
 
     // Mixer facing methods.
-    virtual bool isTimedTrack() const { return true; }
     virtual size_t framesReady() const;
 
     // AudioBufferProvider interface
@@ -296,3 +291,34 @@
     DuplicatingThread* const mSourceThread; // for waitTimeMs() in write()
     AudioTrackClientProxy*      mClientProxy;
 };  // end of OutputTrack
+
+// playback track, used by PatchPanel
+class PatchTrack : public Track, public PatchProxyBufferProvider {
+public:
+
+                        PatchTrack(PlaybackThread *playbackThread,
+                                   uint32_t sampleRate,
+                                   audio_channel_mask_t channelMask,
+                                   audio_format_t format,
+                                   size_t frameCount,
+                                   void *buffer,
+                                   IAudioFlinger::track_flags_t flags);
+    virtual             ~PatchTrack();
+
+    // AudioBufferProvider interface
+    virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer,
+                                   int64_t pts);
+    virtual void releaseBuffer(AudioBufferProvider::Buffer* buffer);
+
+    // PatchProxyBufferProvider interface
+    virtual status_t    obtainBuffer(Proxy::Buffer* buffer,
+                                     const struct timespec *timeOut = NULL);
+    virtual void        releaseBuffer(Proxy::Buffer* buffer);
+
+            void setPeerProxy(PatchProxyBufferProvider *proxy) { mPeerProxy = proxy; }
+
+private:
+    sp<ClientProxy>             mProxy;
+    PatchProxyBufferProvider*   mPeerProxy;
+    struct timespec             mPeerTimeout;
+};  // end of PatchTrack
diff --git a/services/audioflinger/RecordTracks.h b/services/audioflinger/RecordTracks.h
index fe15571..204a9d6 100644
--- a/services/audioflinger/RecordTracks.h
+++ b/services/audioflinger/RecordTracks.h
@@ -28,9 +28,11 @@
                                 audio_format_t format,
                                 audio_channel_mask_t channelMask,
                                 size_t frameCount,
+                                void *buffer,
                                 int sessionId,
                                 int uid,
-                                IAudioFlinger::track_flags_t flags);
+                                IAudioFlinger::track_flags_t flags,
+                                track_type type);
     virtual             ~RecordTrack();
 
     virtual status_t    start(AudioSystem::sync_event_t event, int triggerSession);
@@ -93,3 +95,34 @@
             // used by resampler to find source frames
             ResamplerBufferProvider *mResamplerBufferProvider;
 };
+
+// playback track, used by PatchPanel
+class PatchRecord : virtual public RecordTrack, public PatchProxyBufferProvider {
+public:
+
+    PatchRecord(RecordThread *recordThread,
+                uint32_t sampleRate,
+                audio_channel_mask_t channelMask,
+                audio_format_t format,
+                size_t frameCount,
+                void *buffer,
+                IAudioFlinger::track_flags_t flags);
+    virtual             ~PatchRecord();
+
+    // AudioBufferProvider interface
+    virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer,
+                                   int64_t pts);
+    virtual void releaseBuffer(AudioBufferProvider::Buffer* buffer);
+
+    // PatchProxyBufferProvider interface
+    virtual status_t    obtainBuffer(Proxy::Buffer *buffer,
+                                     const struct timespec *timeOut = NULL);
+    virtual void        releaseBuffer(Proxy::Buffer *buffer);
+
+    void setPeerProxy(PatchProxyBufferProvider *proxy) { mPeerProxy = proxy; }
+
+private:
+    sp<ClientProxy>             mProxy;
+    PatchProxyBufferProvider*   mPeerProxy;
+    struct timespec             mPeerTimeout;
+};  // end of PatchRecord
diff --git a/services/audioflinger/StateQueue.cpp b/services/audioflinger/StateQueue.cpp
index 7e01c9f..40d7bcd 100644
--- a/services/audioflinger/StateQueue.cpp
+++ b/services/audioflinger/StateQueue.cpp
@@ -41,13 +41,14 @@
 // Constructor and destructor
 
 template<typename T> StateQueue<T>::StateQueue() :
-    mNext(NULL), mAck(NULL), mCurrent(NULL),
+    mAck(NULL), mCurrent(NULL),
     mMutating(&mStates[0]), mExpecting(NULL),
     mInMutation(false), mIsDirty(false), mIsInitialized(false)
 #ifdef STATE_QUEUE_DUMP
     , mObserverDump(&mObserverDummyDump), mMutatorDump(&mMutatorDummyDump)
 #endif
 {
+    atomic_init(&mNext, 0);
 }
 
 template<typename T> StateQueue<T>::~StateQueue()
@@ -58,11 +59,8 @@
 
 template<typename T> const T* StateQueue<T>::poll()
 {
-#ifdef __LP64__
-    const T *next = (const T *) android_atomic_acquire_load64((volatile int64_t *) &mNext);
-#else
-    const T *next = (const T *) android_atomic_acquire_load((volatile int32_t *) &mNext);
-#endif
+    const T *next = (const T *) atomic_load_explicit(&mNext, memory_order_acquire);
+
     if (next != mCurrent) {
         mAck = next;    // no additional barrier needed
         mCurrent = next;
@@ -144,11 +142,7 @@
         }
 
         // publish
-#ifdef __LP64__
-        android_atomic_release_store64((int64_t) mMutating, (volatile int64_t *) &mNext);
-#else
-        android_atomic_release_store((int32_t) mMutating, (volatile int32_t *) &mNext);
-#endif
+        atomic_store_explicit(&mNext, (uintptr_t)mMutating, memory_order_release);
         mExpecting = mMutating;
 
         // copy with circular wraparound
diff --git a/services/audioflinger/StateQueue.h b/services/audioflinger/StateQueue.h
index 9e176c4..27f6a28 100644
--- a/services/audioflinger/StateQueue.h
+++ b/services/audioflinger/StateQueue.h
@@ -17,6 +17,8 @@
 #ifndef ANDROID_AUDIO_STATE_QUEUE_H
 #define ANDROID_AUDIO_STATE_QUEUE_H
 
+#include <stdatomic.h>
+
 // The state queue template class was originally driven by this use case / requirements:
 //  There are two threads: a fast mixer, and a normal mixer, and they share state.
 //  The interesting part of the shared state is a set of active fast tracks,
@@ -186,7 +188,7 @@
     T                 mStates[kN];      // written by mutator, read by observer
 
     // "volatile" is meaningless with SMP, but here it indicates that we're using atomic ops
-    volatile const T* mNext; // written by mutator to advance next, read by observer
+    atomic_uintptr_t  mNext; // written by mutator to advance next, read by observer
     volatile const T* mAck;  // written by observer to acknowledge advance of next, read by mutator
 
     // only used by observer
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
old mode 100755
new mode 100644
index 0f01b02..2e2f533
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -167,7 +167,7 @@
 // Initially this heap is used to allocate client buffers for "fast" AudioRecord.
 // Eventually it will be the single buffer that FastCapture writes into via HAL read(),
 // and that all "fast" AudioRecord clients read from.  In either case, the size can be small.
-static const size_t kRecordThreadReadOnlyHeapSize = 0x1000;
+static const size_t kRecordThreadReadOnlyHeapSize = 0x2000;
 
 // ----------------------------------------------------------------------------
 
@@ -910,6 +910,15 @@
         goto Exit;
     }
 
+    // Reject any effect on multichannel sinks.
+    // TODO: fix both format and multichannel issues with effects.
+    if (mChannelCount != FCC_2) {
+        ALOGW("createEffect_l() Cannot add effect %s for multichannel(%d) thread",
+                desc->name, mChannelCount);
+        lStatus = BAD_VALUE;
+        goto Exit;
+    }
+
     // Allow global effects only on offloaded and mixer threads
     if (sessionId == AUDIO_SESSION_OUTPUT_MIX) {
         switch (mType) {
@@ -1146,6 +1155,18 @@
     }
 }
 
+void AudioFlinger::ThreadBase::getAudioPortConfig(struct audio_port_config *config)
+{
+    config->type = AUDIO_PORT_TYPE_MIX;
+    config->ext.mix.handle = mId;
+    config->sample_rate = mSampleRate;
+    config->format = mFormat;
+    config->channel_mask = mChannelMask;
+    config->config_mask = AUDIO_PORT_CONFIG_SAMPLE_RATE|AUDIO_PORT_CONFIG_CHANNEL_MASK|
+                            AUDIO_PORT_CONFIG_FORMAT;
+}
+
+
 // ----------------------------------------------------------------------------
 //      Playback
 // ----------------------------------------------------------------------------
@@ -1376,9 +1397,10 @@
             ) &&
             // PCM data
             audio_is_linear_pcm(format) &&
-            // mono or stereo
-            ( (channelMask == AUDIO_CHANNEL_OUT_MONO) ||
-              (channelMask == AUDIO_CHANNEL_OUT_STEREO) ) &&
+            // identical channel mask to sink, or mono in and stereo sink
+            (channelMask == mChannelMask ||
+                    (channelMask == AUDIO_CHANNEL_OUT_MONO &&
+                            mChannelMask == AUDIO_CHANNEL_OUT_STEREO)) &&
             // hardware sample rate
             (sampleRate == mSampleRate) &&
             // normal mixer has an associated fast mixer
@@ -1482,7 +1504,7 @@
         uint32_t strategy = AudioSystem::getStrategyForStream(streamType);
         for (size_t i = 0; i < mTracks.size(); ++i) {
             sp<Track> t = mTracks[i];
-            if (t != 0 && !t->isOutputTrack()) {
+            if (t != 0 && t->isExternalTrack()) {
                 uint32_t actual = AudioSystem::getStrategyForStream(t->streamType());
                 if (sessionId == t->sessionId() && strategy != actual) {
                     ALOGE("createTrack_l() mismatched strategy; expected %u but found %u",
@@ -1495,7 +1517,8 @@
 
         if (!isTimed) {
             track = new Track(this, client, streamType, sampleRate, format,
-                    channelMask, frameCount, sharedBuffer, sessionId, uid, *flags);
+                              channelMask, frameCount, NULL, sharedBuffer,
+                              sessionId, uid, *flags, TrackBase::TYPE_DEFAULT);
         } else {
             track = TimedTrack::create(this, client, streamType, sampleRate, format,
                     channelMask, frameCount, sharedBuffer, sessionId, uid);
@@ -1608,7 +1631,7 @@
         // the track is newly added, make sure it fills up all its
         // buffers before playing. This is to ensure the client will
         // effectively get the latency it requested.
-        if (!track->isOutputTrack()) {
+        if (track->isExternalTrack()) {
             TrackBase::track_state state = track->mState;
             mLock.unlock();
             status = AudioSystem::startOutput(mId, track->streamType(), track->sessionId());
@@ -1801,9 +1824,10 @@
     if (!audio_is_output_channel(mChannelMask)) {
         LOG_ALWAYS_FATAL("HAL channel mask %#x not valid for output", mChannelMask);
     }
-    if ((mType == MIXER || mType == DUPLICATING) && mChannelMask != AUDIO_CHANNEL_OUT_STEREO) {
-        LOG_ALWAYS_FATAL("HAL channel mask %#x not supported for mixed output; "
-                "must be AUDIO_CHANNEL_OUT_STEREO", mChannelMask);
+    if ((mType == MIXER || mType == DUPLICATING)
+            && !isValidPcmSinkChannelMask(mChannelMask)) {
+        LOG_ALWAYS_FATAL("HAL channel mask %#x not supported for mixed output",
+                mChannelMask);
     }
     mChannelCount = audio_channel_count_from_out_mask(mChannelMask);
     mHALFormat = mOutput->stream->common.get_format(&mOutput->stream->common);
@@ -2044,7 +2068,7 @@
     if (count > 0) {
         for (size_t i = 0 ; i < count ; i++) {
             const sp<Track>& track = tracksToRemove.itemAt(i);
-            if (!track->isOutputTrack()) {
+            if (track->isExternalTrack()) {
                 AudioSystem::stopOutput(mId, track->streamType(), track->sessionId());
 #ifdef ADD_BATTERY_DATA
                 // to track the speaker usage
@@ -2713,6 +2737,26 @@
     return status;
 }
 
+void AudioFlinger::PlaybackThread::addPatchTrack(const sp<PatchTrack>& track)
+{
+    Mutex::Autolock _l(mLock);
+    mTracks.add(track);
+}
+
+void AudioFlinger::PlaybackThread::deletePatchTrack(const sp<PatchTrack>& track)
+{
+    Mutex::Autolock _l(mLock);
+    destroyTrack_l(track);
+}
+
+void AudioFlinger::PlaybackThread::getAudioPortConfig(struct audio_port_config *config)
+{
+    ThreadBase::getAudioPortConfig(config);
+    config->role = AUDIO_PORT_ROLE_SOURCE;
+    config->ext.mix.hw_module = mOutput->audioHwDev->handle();
+    config->ext.mix.usecase.stream = AUDIO_STREAM_DEFAULT;
+}
+
 // ----------------------------------------------------------------------------
 
 AudioFlinger::MixerThread::MixerThread(const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output,
@@ -2732,11 +2776,6 @@
             mNormalFrameCount);
     mAudioMixer = new AudioMixer(mNormalFrameCount, mSampleRate);
 
-    // FIXME - Current mixer implementation only supports stereo output
-    if (mChannelCount != FCC_2) {
-        ALOGE("Invalid audio hardware channel count %d", mChannelCount);
-    }
-
     // create an NBAIO sink for the HAL output stream, and negotiate
     mOutputSink = new AudioStreamOutSink(output->stream);
     size_t numCounterOffers = 0;
@@ -3459,6 +3498,10 @@
                 name,
                 AudioMixer::TRACK,
                 AudioMixer::CHANNEL_MASK, (void *)(uintptr_t)track->channelMask());
+            mAudioMixer->setParameter(
+                name,
+                AudioMixer::TRACK,
+                AudioMixer::MIXER_CHANNEL_MASK, (void *)(uintptr_t)mChannelMask);
             // limit track sample rate to 2 x output sample rate, which changes at re-configuration
             uint32_t maxSampleRate = mSampleRate * 2;
             uint32_t reqSampleRate = track->mAudioTrackServerProxy->getSampleRate();
@@ -3643,7 +3686,7 @@
             memset(mEffectBuffer, 0, mEffectBufferSize);
         }
         // FIXME as a performance optimization, should remember previous zero status
-        memset(mSinkBuffer, 0, mNormalFrameCount * mChannelCount * sizeof(int16_t));
+        memset(mSinkBuffer, 0, mNormalFrameCount * mFrameSize);
     }
 
     // if any fast tracks, then status is ready
@@ -3697,7 +3740,7 @@
         reconfig = true;
     }
     if (param.getInt(String8(AudioParameter::keyFormat), value) == NO_ERROR) {
-        if ((audio_format_t) value != AUDIO_FORMAT_PCM_16_BIT) {
+        if (!isValidPcmSinkFormat((audio_format_t) value)) {
             status = BAD_VALUE;
         } else {
             // no need to save value, since it's constant
@@ -3705,7 +3748,7 @@
         }
     }
     if (param.getInt(String8(AudioParameter::keyChannels), value) == NO_ERROR) {
-        if ((audio_channel_mask_t) value != AUDIO_CHANNEL_OUT_STEREO) {
+        if (!isValidPcmSinkChannelMask((audio_channel_mask_t) value)) {
             status = BAD_VALUE;
         } else {
             // no need to save value, since it's constant
@@ -4783,7 +4826,7 @@
     , mPipeFramesP2(0)
     // mPipeMemory
     // mFastCaptureNBLogWriter
-    , mFastTrackAvail(true)
+    , mFastTrackAvail(false)
 {
     snprintf(mName, kNameLength, "AudioIn_%X", id);
     mNBLogWriter = audioFlinger->newWriter_l(kLogSize, mName);
@@ -4819,8 +4862,8 @@
                 // or primary output sample rate is unknown, and capture sample rate is reasonable
                 ((primaryOutputSampleRate == 0) &&
                     ((mSampleRate == 44100 || mSampleRate == 48000)))) &&
-                // and the buffer size is < 10 ms
-                (mFrameCount * 1000) / mSampleRate < 10;
+                // and the buffer size is < 12 ms
+                (mFrameCount * 1000) / mSampleRate < 12;
         break;
     // case FastCapture_Dynamic:
     }
@@ -4895,6 +4938,7 @@
         // FIXME
 #endif
 
+        mFastTrackAvail = true;
     }
 failed: ;
 
@@ -5253,11 +5297,13 @@
                     //       to keep mRsmpInBuffer full so resampler always has sufficient input
                     size_t framesInNeeded;
                     // FIXME only re-calculate when it changes, and optimize for common ratios
-                    double inOverOut = (double) mSampleRate / activeTrack->mSampleRate;
-                    double outOverIn = (double) activeTrack->mSampleRate / mSampleRate;
-                    framesInNeeded = ceil(framesOut * inOverOut) + 1;
+                    // Do not precompute in/out because floating point is not associative
+                    // e.g. a*b/c != a*(b/c).
+                    const double in(mSampleRate);
+                    const double out(activeTrack->mSampleRate);
+                    framesInNeeded = ceil(framesOut * in / out) + 1;
                     ALOGV("need %u frames in to produce %u out given in/out ratio of %.4g",
-                                framesInNeeded, framesOut, inOverOut);
+                                framesInNeeded, framesOut, in / out);
                     // Although we theoretically have framesIn in circular buffer, some of those are
                     // unreleased frames, and thus must be discounted for purpose of budgeting.
                     size_t unreleased = activeTrack->mRsmpInUnrel;
@@ -5265,24 +5311,24 @@
                     if (framesIn < framesInNeeded) {
                         ALOGV("not enough to resample: have %u frames in but need %u in to "
                                 "produce %u out given in/out ratio of %.4g",
-                                framesIn, framesInNeeded, framesOut, inOverOut);
-                        size_t newFramesOut = framesIn > 0 ? floor((framesIn - 1) * outOverIn) : 0;
+                                framesIn, framesInNeeded, framesOut, in / out);
+                        size_t newFramesOut = framesIn > 0 ? floor((framesIn - 1) * out / in) : 0;
                         LOG_ALWAYS_FATAL_IF(newFramesOut >= framesOut);
                         if (newFramesOut == 0) {
                             break;
                         }
-                        framesInNeeded = ceil(newFramesOut * inOverOut) + 1;
+                        framesInNeeded = ceil(newFramesOut * in / out) + 1;
                         ALOGV("now need %u frames in to produce %u out given out/in ratio of %.4g",
-                                framesInNeeded, newFramesOut, outOverIn);
+                                framesInNeeded, newFramesOut, out / in);
                         LOG_ALWAYS_FATAL_IF(framesIn < framesInNeeded);
                         ALOGV("success 2: have %u frames in and need %u in to produce %u out "
                               "given in/out ratio of %.4g",
-                              framesIn, framesInNeeded, newFramesOut, inOverOut);
+                              framesIn, framesInNeeded, newFramesOut, in / out);
                         framesOut = newFramesOut;
                     } else {
                         ALOGV("success 1: have %u in and need %u in to produce %u out "
                             "given in/out ratio of %.4g",
-                            framesIn, framesInNeeded, framesOut, inOverOut);
+                            framesIn, framesInNeeded, framesOut, in / out);
                     }
 
                     // reallocate mRsmpOutBuffer as needed; we will grow but never shrink
@@ -5457,21 +5503,14 @@
     // client expresses a preference for FAST, but we get the final say
     if (*flags & IAudioFlinger::TRACK_FAST) {
       if (
-            // use case: callback handler and frame count is default or at least as large as HAL
-            (
-                (tid != -1) &&
-                ((frameCount == 0) /*||
-                // FIXME must be equal to pipe depth, so don't allow it to be specified by client
-                // FIXME not necessarily true, should be native frame count for native SR!
-                (frameCount >= mFrameCount)*/)
-            ) &&
+            // use case: callback handler
+            (tid != -1) &&
+            // frame count is not specified, or is exactly the pipe depth
+            ((frameCount == 0) || (frameCount == mPipeFramesP2)) &&
             // PCM data
             audio_is_linear_pcm(format) &&
             // native format
             (format == mFormat) &&
-            // mono or stereo
-            ( (channelMask == AUDIO_CHANNEL_IN_MONO) ||
-              (channelMask == AUDIO_CHANNEL_IN_STEREO) ) &&
             // native channel mask
             (channelMask == mChannelMask) &&
             // native hardware sample rate
@@ -5481,40 +5520,43 @@
             // there are sufficient fast track slots available
             mFastTrackAvail
         ) {
-        // if frameCount not specified, then it defaults to pipe frame count
-        if (frameCount == 0) {
-            frameCount = mPipeFramesP2;
-        }
-        ALOGV("AUDIO_INPUT_FLAG_FAST accepted: frameCount=%d mFrameCount=%d",
+        ALOGV("AUDIO_INPUT_FLAG_FAST accepted: frameCount=%u mFrameCount=%u",
                 frameCount, mFrameCount);
       } else {
-        ALOGV("AUDIO_INPUT_FLAG_FAST denied: frameCount=%d "
-                "mFrameCount=%d format=%d isLinear=%d channelMask=%#x sampleRate=%u mSampleRate=%u "
+        ALOGV("AUDIO_INPUT_FLAG_FAST denied: frameCount=%u mFrameCount=%u mPipeFramesP2=%u "
+                "format=%#x isLinear=%d channelMask=%#x sampleRate=%u mSampleRate=%u "
                 "hasFastCapture=%d tid=%d mFastTrackAvail=%d",
-                frameCount, mFrameCount, format,
-                audio_is_linear_pcm(format),
-                channelMask, sampleRate, mSampleRate, hasFastCapture(), tid, mFastTrackAvail);
+                frameCount, mFrameCount, mPipeFramesP2,
+                format, audio_is_linear_pcm(format), channelMask, sampleRate, mSampleRate,
+                hasFastCapture(), tid, mFastTrackAvail);
         *flags &= ~IAudioFlinger::TRACK_FAST;
-        // FIXME It's not clear that we need to enforce this any more, since we have a pipe.
-        // For compatibility with AudioRecord calculation, buffer depth is forced
-        // to be at least 2 x the record thread frame count and cover audio hardware latency.
-        // This is probably too conservative, but legacy application code may depend on it.
-        // If you change this calculation, also review the start threshold which is related.
-        // FIXME It's not clear how input latency actually matters.  Perhaps this should be 0.
-        uint32_t latencyMs = 50; // FIXME mInput->stream->get_latency(mInput->stream);
-        size_t mNormalFrameCount = 2048; // FIXME
-        uint32_t minBufCount = latencyMs / ((1000 * mNormalFrameCount) / mSampleRate);
-        if (minBufCount < 2) {
-            minBufCount = 2;
-        }
-        size_t minFrameCount = mNormalFrameCount * minBufCount;
+      }
+    }
+
+    // compute track buffer size in frames, and suggest the notification frame count
+    if (*flags & IAudioFlinger::TRACK_FAST) {
+        // fast track: frame count is exactly the pipe depth
+        frameCount = mPipeFramesP2;
+        // ignore requested notificationFrames, and always notify exactly once every HAL buffer
+        *notificationFrames = mFrameCount;
+    } else {
+        // not fast track: frame count is at least 2 HAL buffers and at least 20 ms
+        size_t minFrameCount = ((int64_t) mFrameCount * 2 * sampleRate + mSampleRate - 1) /
+                mSampleRate;
         if (frameCount < minFrameCount) {
             frameCount = minFrameCount;
         }
-      }
+        minFrameCount = (sampleRate * 20 / 1000 + 1) & ~1;
+        if (frameCount < minFrameCount) {
+            frameCount = minFrameCount;
+        }
+        // notification is forced to be at least double-buffering
+        size_t maxNotification = frameCount / 2;
+        if (*notificationFrames == 0 || *notificationFrames > maxNotification) {
+            *notificationFrames = maxNotification;
+        }
     }
     *pFrameCount = frameCount;
-    *notificationFrames = 0;    // FIXME implement
 
     lStatus = initCheck();
     if (lStatus != NO_ERROR) {
@@ -5526,8 +5568,8 @@
         Mutex::Autolock _l(mLock);
 
         track = new RecordTrack(this, client, sampleRate,
-                      format, channelMask, frameCount, sessionId, uid,
-                      *flags);
+                      format, channelMask, frameCount, NULL, sessionId, uid,
+                      *flags, TrackBase::TYPE_DEFAULT);
 
         lStatus = track->initCheck();
         if (lStatus != NO_ERROR) {
@@ -5604,15 +5646,19 @@
         recordTrack->mState = TrackBase::STARTING_1;
         mActiveTracks.add(recordTrack);
         mActiveTracksGen++;
-        mLock.unlock();
-        status_t status = AudioSystem::startInput(mId);
-        mLock.lock();
-        // FIXME should verify that recordTrack is still in mActiveTracks
-        if (status != NO_ERROR) {
-            mActiveTracks.remove(recordTrack);
-            mActiveTracksGen++;
-            recordTrack->clearSyncStartEvent();
-            return status;
+        status_t status = NO_ERROR;
+        if (recordTrack->isExternalTrack()) {
+            mLock.unlock();
+            status = AudioSystem::startInput(mId, (audio_session_t)recordTrack->sessionId());
+            mLock.lock();
+            // FIXME should verify that recordTrack is still in mActiveTracks
+            if (status != NO_ERROR) {
+                mActiveTracks.remove(recordTrack);
+                mActiveTracksGen++;
+                recordTrack->clearSyncStartEvent();
+                ALOGV("RecordThread::start error %d", status);
+                return status;
+            }
         }
         // Catch up with current buffer indices if thread is already running.
         // This is what makes a new client discard all buffered data.  If the track's mRsmpInFront
@@ -5637,7 +5683,9 @@
     }
 
 startError:
-    AudioSystem::stopInput(mId);
+    if (recordTrack->isExternalTrack()) {
+        AudioSystem::stopInput(mId, (audio_session_t)recordTrack->sessionId());
+    }
     recordTrack->clearSyncStartEvent();
     // FIXME I wonder why we do not reset the state here?
     return status;
@@ -5745,6 +5793,7 @@
     } else {
         dprintf(fd, "  No active record clients\n");
     }
+    dprintf(fd, "  Fast capture thread: %s\n", hasFastCapture() ? "yes" : "no");
     dprintf(fd, "  Fast track available: %s\n", mFastTrackAvail ? "yes" : "no");
 
     dumpBase(fd, args);
@@ -6179,5 +6228,24 @@
     return status;
 }
 
+void AudioFlinger::RecordThread::addPatchRecord(const sp<PatchRecord>& record)
+{
+    Mutex::Autolock _l(mLock);
+    mTracks.add(record);
+}
+
+void AudioFlinger::RecordThread::deletePatchRecord(const sp<PatchRecord>& record)
+{
+    Mutex::Autolock _l(mLock);
+    destroyTrack_l(record);
+}
+
+void AudioFlinger::RecordThread::getAudioPortConfig(struct audio_port_config *config)
+{
+    ThreadBase::getAudioPortConfig(config);
+    config->role = AUDIO_PORT_ROLE_SINK;
+    config->ext.mix.hw_module = mInput->audioHwDev->handle();
+    config->ext.mix.usecase.source = mAudioSource;
+}
 
 }; // namespace android
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index 3b7257b..648502b 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -235,6 +235,7 @@
                 uint32_t    sampleRate() const { return mSampleRate; }
                 audio_channel_mask_t channelMask() const { return mChannelMask; }
                 audio_format_t format() const { return mHALFormat; }
+                uint32_t channelCount() const { return mChannelCount; }
                 // Called by AudioFlinger::frameCount(audio_io_handle_t output) and effects,
                 // and returns the [normal mix] buffer's frame count.
     virtual     size_t      frameCount() const = 0;
@@ -264,6 +265,7 @@
     virtual     status_t    createAudioPatch_l(const struct audio_patch *patch,
                                                audio_patch_handle_t *handle) = 0;
     virtual     status_t    releaseAudioPatch_l(const audio_patch_handle_t handle) = 0;
+    virtual     void        getAudioPortConfig(struct audio_port_config *config) = 0;
 
 
                 // see note at declaration of mStandby, mOutDevice and mInDevice
@@ -589,7 +591,12 @@
                 // Return's the HAL's frame count i.e. fast mixer buffer size.
                 size_t      frameCountHAL() const { return mFrameCount; }
 
-                status_t         getTimestamp_l(AudioTimestamp& timestamp);
+                status_t    getTimestamp_l(AudioTimestamp& timestamp);
+
+                void        addPatchTrack(const sp<PatchTrack>& track);
+                void        deletePatchTrack(const sp<PatchTrack>& track);
+
+    virtual     void        getAudioPortConfig(struct audio_port_config *config);
 
 protected:
     // updated by readOutputParameters_l()
@@ -876,6 +883,7 @@
                               ALOG_ASSERT(fastIndex < FastMixerState::kMaxFastTracks);
                               return mFastMixerDumpState.mTracks[fastIndex].mUnderruns;
                             }
+
 };
 
 class DirectOutputThread : public PlaybackThread {
@@ -1103,6 +1111,10 @@
     virtual status_t    createAudioPatch_l(const struct audio_patch *patch,
                                            audio_patch_handle_t *handle);
     virtual status_t    releaseAudioPatch_l(const audio_patch_handle_t handle);
+
+            void        addPatchRecord(const sp<PatchRecord>& record);
+            void        deletePatchRecord(const sp<PatchRecord>& record);
+
             void        readInputParameters_l();
     virtual uint32_t    getInputFramesLost();
 
@@ -1122,6 +1134,7 @@
 
     virtual size_t      frameCount() const { return mFrameCount; }
             bool        hasFastCapture() const { return mFastCapture != 0; }
+    virtual void        getAudioPortConfig(struct audio_port_config *config);
 
 private:
             // Enter standby if not already in standby, and set mStandby flag
diff --git a/services/audioflinger/TrackBase.h b/services/audioflinger/TrackBase.h
index 4cba3fd..864daa5 100644
--- a/services/audioflinger/TrackBase.h
+++ b/services/audioflinger/TrackBase.h
@@ -44,6 +44,15 @@
         ALLOC_CBLK,     // allocate immediately after control block
         ALLOC_READONLY, // allocate from a separate read-only heap per thread
         ALLOC_PIPE,     // do not allocate; use the pipe buffer
+        ALLOC_LOCAL,    // allocate a local buffer
+        ALLOC_NONE,     // do not allocate:use the buffer passed to TrackBase constructor
+    };
+
+    enum track_type {
+        TYPE_DEFAULT,
+        TYPE_TIMED,
+        TYPE_OUTPUT,
+        TYPE_PATCH,
     };
 
                         TrackBase(ThreadBase *thread,
@@ -52,14 +61,15 @@
                                 audio_format_t format,
                                 audio_channel_mask_t channelMask,
                                 size_t frameCount,
-                                const sp<IMemory>& sharedBuffer,
+                                void *buffer,
                                 int sessionId,
                                 int uid,
                                 IAudioFlinger::track_flags_t flags,
                                 bool isOut,
-                                alloc_type alloc = ALLOC_CBLK);
+                                alloc_type alloc = ALLOC_CBLK,
+                                track_type type = TYPE_DEFAULT);
     virtual             ~TrackBase();
-    virtual status_t    initCheck() const { return getCblk() != 0 ? NO_ERROR : NO_MEMORY; }
+    virtual status_t    initCheck() const;
 
     virtual status_t    start(AudioSystem::sync_event_t event,
                              int triggerSession) = 0;
@@ -71,7 +81,12 @@
     virtual status_t    setSyncEvent(const sp<SyncEvent>& event);
 
             sp<IMemory> getBuffers() const { return mBufferMemory; }
+            void*       buffer() const { return mBuffer; }
             bool        isFastTrack() const { return (mFlags & IAudioFlinger::TRACK_FAST) != 0; }
+            bool        isTimedTrack() const { return (mType == TYPE_TIMED); }
+            bool        isOutputTrack() const { return (mType == TYPE_OUTPUT); }
+            bool        isPatchTrack() const { return (mType == TYPE_PATCH); }
+            bool        isExternalTrack() const { return !isOutputTrack() && !isPatchTrack(); }
 
 protected:
                         TrackBase(const TrackBase&);
@@ -150,4 +165,18 @@
     sp<NBAIO_Sink>      mTeeSink;
     sp<NBAIO_Source>    mTeeSource;
     bool                mTerminated;
+    track_type          mType;      // must be one of TYPE_DEFAULT, TYPE_OUTPUT, TYPE_PATCH ...
+};
+
+// PatchProxyBufferProvider interface is implemented by PatchTrack and PatchRecord.
+// it provides buffer access methods that map those of a ClientProxy (see AudioTrackShared.h)
+class PatchProxyBufferProvider
+{
+public:
+
+    virtual ~PatchProxyBufferProvider() {}
+
+    virtual status_t    obtainBuffer(Proxy::Buffer* buffer,
+                                     const struct timespec *requested = NULL) = 0;
+    virtual void        releaseBuffer(Proxy::Buffer* buffer) = 0;
 };
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index cacb066..48093da 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -68,12 +68,13 @@
             audio_format_t format,
             audio_channel_mask_t channelMask,
             size_t frameCount,
-            const sp<IMemory>& sharedBuffer,
+            void *buffer,
             int sessionId,
             int clientUid,
             IAudioFlinger::track_flags_t flags,
             bool isOut,
-            alloc_type alloc)
+            alloc_type alloc,
+            track_type type)
     :   RefBase(),
         mThread(thread),
         mClient(client),
@@ -94,7 +95,8 @@
         mIsOut(isOut),
         mServerProxy(NULL),
         mId(android_atomic_inc(&nextTrackId)),
-        mTerminated(false)
+        mTerminated(false),
+        mType(type)
 {
     // if the caller is us, trust the specified uid
     if (IPCThreadState::self()->getCallingPid() != getpid_cached || clientUid == -1) {
@@ -108,16 +110,10 @@
     // battery usage on it.
     mUid = clientUid;
 
-    // client == 0 implies sharedBuffer == 0
-    ALOG_ASSERT(!(client == 0 && sharedBuffer != 0));
-
-    ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %d", sharedBuffer->pointer(),
-            sharedBuffer->size());
-
     // ALOGD("Creating track with %d buffers @ %d bytes", bufferCount, bufferSize);
     size_t size = sizeof(audio_track_cblk_t);
-    size_t bufferSize = (sharedBuffer == 0 ? roundup(frameCount) : frameCount) * mFrameSize;
-    if (sharedBuffer == 0 && alloc == ALLOC_CBLK) {
+    size_t bufferSize = (buffer == NULL ? roundup(frameCount) : frameCount) * mFrameSize;
+    if (buffer == NULL && alloc == ALLOC_CBLK) {
         size += bufferSize;
     }
 
@@ -166,16 +162,22 @@
             break;
         case ALLOC_CBLK:
             // clear all buffers
-            if (sharedBuffer == 0) {
+            if (buffer == NULL) {
                 mBuffer = (char*)mCblk + sizeof(audio_track_cblk_t);
                 memset(mBuffer, 0, bufferSize);
             } else {
-                mBuffer = sharedBuffer->pointer();
+                mBuffer = buffer;
 #if 0
                 mCblk->mFlags = CBLK_FORCEREADY;    // FIXME hack, need to fix the track ready logic
 #endif
             }
             break;
+        case ALLOC_LOCAL:
+            mBuffer = calloc(1, bufferSize);
+            break;
+        case ALLOC_NONE:
+            mBuffer = buffer;
+            break;
         }
 
 #ifdef TEE_SINK
@@ -200,6 +202,17 @@
     }
 }
 
+status_t AudioFlinger::ThreadBase::TrackBase::initCheck() const
+{
+    status_t status;
+    if (mType == TYPE_OUTPUT || mType == TYPE_PATCH) {
+        status = cblk() != NULL ? NO_ERROR : NO_MEMORY;
+    } else {
+        status = getCblk() != 0 ? NO_ERROR : NO_MEMORY;
+    }
+    return status;
+}
+
 AudioFlinger::ThreadBase::TrackBase::~TrackBase()
 {
 #ifdef TEE_SINK
@@ -364,12 +377,17 @@
             audio_format_t format,
             audio_channel_mask_t channelMask,
             size_t frameCount,
+            void *buffer,
             const sp<IMemory>& sharedBuffer,
             int sessionId,
             int uid,
-            IAudioFlinger::track_flags_t flags)
-    :   TrackBase(thread, client, sampleRate, format, channelMask, frameCount, sharedBuffer,
-            sessionId, uid, flags, true /*isOut*/),
+            IAudioFlinger::track_flags_t flags,
+            track_type type)
+    :   TrackBase(thread, client, sampleRate, format, channelMask, frameCount,
+                  (sharedBuffer != 0) ? sharedBuffer->pointer() : buffer,
+                  sessionId, uid, flags, true /*isOut*/,
+                  (type == TYPE_PATCH) ? ( buffer == NULL ? ALLOC_LOCAL : ALLOC_NONE) : ALLOC_CBLK,
+                  type),
     mFillingUpStatus(FS_INVALID),
     // mRetryCount initialized later when needed
     mSharedBuffer(sharedBuffer),
@@ -389,13 +407,19 @@
     mPreviousFramesWritten(0)
     // mPreviousTimestamp
 {
+    // client == 0 implies sharedBuffer == 0
+    ALOG_ASSERT(!(client == 0 && sharedBuffer != 0));
+
+    ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %d", sharedBuffer->pointer(),
+            sharedBuffer->size());
+
     if (mCblk == NULL) {
         return;
     }
 
     if (sharedBuffer == 0) {
         mAudioTrackServerProxy = new AudioTrackServerProxy(mCblk, mBuffer, frameCount,
-                mFrameSize);
+                mFrameSize, !isExternalTrack(), sampleRate);
     } else {
         mAudioTrackServerProxy = new StaticAudioTrackServerProxy(mCblk, mBuffer, frameCount,
                 mFrameSize);
@@ -463,7 +487,7 @@
             Mutex::Autolock _l(thread->mLock);
             PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
             bool wasActive = playbackThread->destroyTrack_l(this);
-            if (!isOutputTrack() && !wasActive) {
+            if (isExternalTrack() && !wasActive) {
                 AudioSystem::releaseOutput(thread->id());
             }
         }
@@ -1122,7 +1146,8 @@
             int sessionId,
             int uid)
     : Track(thread, client, streamType, sampleRate, format, channelMask,
-            frameCount, sharedBuffer, sessionId, uid, IAudioFlinger::TRACK_TIMED),
+            frameCount, (sharedBuffer != 0) ? sharedBuffer->pointer() : NULL, sharedBuffer,
+                    sessionId, uid, IAudioFlinger::TRACK_TIMED, TYPE_TIMED),
       mQueueHeadInFlight(false),
       mTrimQueueHeadOnRelease(false),
       mFramesPendingInQueue(0),
@@ -1617,7 +1642,7 @@
             size_t frameCount,
             int uid)
     :   Track(playbackThread, NULL, AUDIO_STREAM_CNT, sampleRate, format, channelMask, frameCount,
-                NULL, 0, uid, IAudioFlinger::TRACK_DEFAULT),
+                NULL, 0, 0, uid, IAudioFlinger::TRACK_DEFAULT, TYPE_OUTPUT),
     mActive(false), mSourceThread(sourceThread), mClientProxy(NULL)
 {
 
@@ -1630,12 +1655,11 @@
                 frameCount, mChannelMask);
         // since client and server are in the same process,
         // the buffer has the same virtual address on both sides
-        mClientProxy = new AudioTrackClientProxy(mCblk, mBuffer, mFrameCount, mFrameSize);
+        mClientProxy = new AudioTrackClientProxy(mCblk, mBuffer, mFrameCount, mFrameSize,
+                true /*clientInServer*/);
         mClientProxy->setVolumeLR(GAIN_MINIFLOAT_PACKED_UNITY);
         mClientProxy->setSendLevel(0.0);
         mClientProxy->setSampleRate(sampleRate);
-        mClientProxy = new AudioTrackClientProxy(mCblk, mBuffer, mFrameCount, mFrameSize,
-                true /*clientInServer*/);
     } else {
         ALOGW("Error creating output track on thread %p", playbackThread);
     }
@@ -1826,6 +1850,75 @@
 }
 
 
+AudioFlinger::PlaybackThread::PatchTrack::PatchTrack(PlaybackThread *playbackThread,
+                                                     uint32_t sampleRate,
+                                                     audio_channel_mask_t channelMask,
+                                                     audio_format_t format,
+                                                     size_t frameCount,
+                                                     void *buffer,
+                                                     IAudioFlinger::track_flags_t flags)
+    :   Track(playbackThread, NULL, AUDIO_STREAM_CNT, sampleRate, format, channelMask, frameCount,
+              buffer, 0, 0, getuid(), flags, TYPE_PATCH),
+              mProxy(new ClientProxy(mCblk, mBuffer, frameCount, mFrameSize, true, true))
+{
+    uint64_t mixBufferNs = ((uint64_t)2 * playbackThread->frameCount() * 1000000000) /
+                                                                    playbackThread->sampleRate();
+    mPeerTimeout.tv_sec = mixBufferNs / 1000000000;
+    mPeerTimeout.tv_nsec = (int) (mixBufferNs % 1000000000);
+
+    ALOGV("PatchTrack %p sampleRate %d mPeerTimeout %d.%03d sec",
+                                      this, sampleRate,
+                                      (int)mPeerTimeout.tv_sec,
+                                      (int)(mPeerTimeout.tv_nsec / 1000000));
+}
+
+AudioFlinger::PlaybackThread::PatchTrack::~PatchTrack()
+{
+}
+
+// AudioBufferProvider interface
+status_t AudioFlinger::PlaybackThread::PatchTrack::getNextBuffer(
+        AudioBufferProvider::Buffer* buffer, int64_t pts)
+{
+    ALOG_ASSERT(mPeerProxy != 0, "PatchTrack::getNextBuffer() called without peer proxy");
+    Proxy::Buffer buf;
+    buf.mFrameCount = buffer->frameCount;
+    status_t status = mPeerProxy->obtainBuffer(&buf, &mPeerTimeout);
+    ALOGV_IF(status != NO_ERROR, "PatchTrack() %p getNextBuffer status %d", this, status);
+    if (buf.mFrameCount == 0) {
+        return WOULD_BLOCK;
+    }
+    buffer->frameCount = buf.mFrameCount;
+    status = Track::getNextBuffer(buffer, pts);
+    return status;
+}
+
+void AudioFlinger::PlaybackThread::PatchTrack::releaseBuffer(AudioBufferProvider::Buffer* buffer)
+{
+    ALOG_ASSERT(mPeerProxy != 0, "PatchTrack::releaseBuffer() called without peer proxy");
+    Proxy::Buffer buf;
+    buf.mFrameCount = buffer->frameCount;
+    buf.mRaw = buffer->raw;
+    mPeerProxy->releaseBuffer(&buf);
+    TrackBase::releaseBuffer(buffer);
+}
+
+status_t AudioFlinger::PlaybackThread::PatchTrack::obtainBuffer(Proxy::Buffer* buffer,
+                                                                const struct timespec *timeOut)
+{
+    return mProxy->obtainBuffer(buffer, timeOut);
+}
+
+void AudioFlinger::PlaybackThread::PatchTrack::releaseBuffer(Proxy::Buffer* buffer)
+{
+    mProxy->releaseBuffer(buffer);
+    if (android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags) & CBLK_DISABLED) {
+        ALOGW("PatchTrack::releaseBuffer() disabled due to previous underrun, restarting");
+        start();
+    }
+    android_atomic_or(CBLK_FORCEREADY, &mCblk->mFlags);
+}
+
 // ----------------------------------------------------------------------------
 //      Record
 // ----------------------------------------------------------------------------
@@ -1873,13 +1966,18 @@
             audio_format_t format,
             audio_channel_mask_t channelMask,
             size_t frameCount,
+            void *buffer,
             int sessionId,
             int uid,
-            IAudioFlinger::track_flags_t flags)
+            IAudioFlinger::track_flags_t flags,
+            track_type type)
     :   TrackBase(thread, client, sampleRate, format,
-                  channelMask, frameCount, 0 /*sharedBuffer*/, sessionId, uid,
+                  channelMask, frameCount, buffer, sessionId, uid,
                   flags, false /*isOut*/,
-                  flags & IAudioFlinger::TRACK_FAST ? ALLOC_PIPE : ALLOC_CBLK),
+                  (type == TYPE_DEFAULT) ?
+                          ((flags & IAudioFlinger::TRACK_FAST) ? ALLOC_PIPE : ALLOC_CBLK) :
+                          ((buffer == NULL) ? ALLOC_LOCAL : ALLOC_NONE),
+                  type),
         mOverflow(false), mResampler(NULL), mRsmpOutBuffer(NULL), mRsmpOutFrameCount(0),
         // See real initialization of mRsmpInFront at RecordThread::start()
         mRsmpInUnrel(0), mRsmpInFront(0), mFramesToDrop(0), mResamplerBufferProvider(NULL)
@@ -1888,7 +1986,8 @@
         return;
     }
 
-    mServerProxy = new AudioRecordServerProxy(mCblk, mBuffer, frameCount, mFrameSize);
+    mServerProxy = new AudioRecordServerProxy(mCblk, mBuffer, frameCount,
+                                              mFrameSize, !isExternalTrack());
 
     uint32_t channelCount = audio_channel_count_from_in_mask(channelMask);
     // FIXME I don't understand either of the channel count checks
@@ -1950,8 +2049,8 @@
     sp<ThreadBase> thread = mThread.promote();
     if (thread != 0) {
         RecordThread *recordThread = (RecordThread *)thread.get();
-        if (recordThread->stop(this)) {
-            AudioSystem::stopInput(recordThread->id());
+        if (recordThread->stop(this) && isExternalTrack()) {
+            AudioSystem::stopInput(recordThread->id(), (audio_session_t)mSessionId);
         }
     }
 }
@@ -1963,10 +2062,12 @@
     {
         sp<ThreadBase> thread = mThread.promote();
         if (thread != 0) {
-            if (mState == ACTIVE || mState == RESUMING) {
-                AudioSystem::stopInput(thread->id());
+            if (isExternalTrack()) {
+                if (mState == ACTIVE || mState == RESUMING) {
+                    AudioSystem::stopInput(thread->id(), (audio_session_t)mSessionId);
+                }
+                AudioSystem::releaseInput(thread->id(), (audio_session_t)mSessionId);
             }
-            AudioSystem::releaseInput(thread->id());
             Mutex::Autolock _l(thread->mLock);
             RecordThread *recordThread = (RecordThread *) thread.get();
             recordThread->destroyTrack_l(this);
@@ -1987,12 +2088,12 @@
 
 /*static*/ void AudioFlinger::RecordThread::RecordTrack::appendDumpHeader(String8& result)
 {
-    result.append("    Active Client Fmt Chn mask Session S   Server fCount Resampling\n");
+    result.append("    Active Client Fmt Chn mask Session S   Server fCount SRate\n");
 }
 
 void AudioFlinger::RecordThread::RecordTrack::dump(char* buffer, size_t size, bool active)
 {
-    snprintf(buffer, size, "    %6s %6u %3u %08X %7u %1d %08X %6zu %10d\n",
+    snprintf(buffer, size, "    %6s %6u %3u %08X %7u %1d %08X %6zu %5u\n",
             active ? "yes" : "no",
             (mClient == 0) ? getpid_cached : mClient->pid(),
             mFormat,
@@ -2001,7 +2102,7 @@
             mState,
             mCblk->mServer,
             mFrameCount,
-            mResampler != NULL);
+            mSampleRate);
 
 }
 
@@ -2028,4 +2129,70 @@
     mFramesToDrop = 0;
 }
 
+
+AudioFlinger::RecordThread::PatchRecord::PatchRecord(RecordThread *recordThread,
+                                                     uint32_t sampleRate,
+                                                     audio_channel_mask_t channelMask,
+                                                     audio_format_t format,
+                                                     size_t frameCount,
+                                                     void *buffer,
+                                                     IAudioFlinger::track_flags_t flags)
+    :   RecordTrack(recordThread, NULL, sampleRate, format, channelMask, frameCount,
+                buffer, 0, getuid(), flags, TYPE_PATCH),
+                mProxy(new ClientProxy(mCblk, mBuffer, frameCount, mFrameSize, false, true))
+{
+    uint64_t mixBufferNs = ((uint64_t)2 * recordThread->frameCount() * 1000000000) /
+                                                                recordThread->sampleRate();
+    mPeerTimeout.tv_sec = mixBufferNs / 1000000000;
+    mPeerTimeout.tv_nsec = (int) (mixBufferNs % 1000000000);
+
+    ALOGV("PatchRecord %p sampleRate %d mPeerTimeout %d.%03d sec",
+                                      this, sampleRate,
+                                      (int)mPeerTimeout.tv_sec,
+                                      (int)(mPeerTimeout.tv_nsec / 1000000));
+}
+
+AudioFlinger::RecordThread::PatchRecord::~PatchRecord()
+{
+}
+
+// AudioBufferProvider interface
+status_t AudioFlinger::RecordThread::PatchRecord::getNextBuffer(
+                                                  AudioBufferProvider::Buffer* buffer, int64_t pts)
+{
+    ALOG_ASSERT(mPeerProxy != 0, "PatchRecord::getNextBuffer() called without peer proxy");
+    Proxy::Buffer buf;
+    buf.mFrameCount = buffer->frameCount;
+    status_t status = mPeerProxy->obtainBuffer(&buf, &mPeerTimeout);
+    ALOGV_IF(status != NO_ERROR,
+             "PatchRecord() %p mPeerProxy->obtainBuffer status %d", this, status);
+    if (buf.mFrameCount == 0) {
+        return WOULD_BLOCK;
+    }
+    buffer->frameCount = buf.mFrameCount;
+    status = RecordTrack::getNextBuffer(buffer, pts);
+    return status;
+}
+
+void AudioFlinger::RecordThread::PatchRecord::releaseBuffer(AudioBufferProvider::Buffer* buffer)
+{
+    ALOG_ASSERT(mPeerProxy != 0, "PatchRecord::releaseBuffer() called without peer proxy");
+    Proxy::Buffer buf;
+    buf.mFrameCount = buffer->frameCount;
+    buf.mRaw = buffer->raw;
+    mPeerProxy->releaseBuffer(&buf);
+    TrackBase::releaseBuffer(buffer);
+}
+
+status_t AudioFlinger::RecordThread::PatchRecord::obtainBuffer(Proxy::Buffer* buffer,
+                                                               const struct timespec *timeOut)
+{
+    return mProxy->obtainBuffer(buffer, timeOut);
+}
+
+void AudioFlinger::RecordThread::PatchRecord::releaseBuffer(Proxy::Buffer* buffer)
+{
+    mProxy->releaseBuffer(buffer);
+}
+
 }; // namespace android
diff --git a/services/audioflinger/tests/mixer_to_wav_tests.sh b/services/audioflinger/tests/mixer_to_wav_tests.sh
index 93bff47..9b39e77 100755
--- a/services/audioflinger/tests/mixer_to_wav_tests.sh
+++ b/services/audioflinger/tests/mixer_to_wav_tests.sh
@@ -72,9 +72,9 @@
 # track__Resample / track__genericResample
 # track__NoResample / track__16BitsStereo / track__16BitsMono
 # Aux buffer
-    adb shell test-mixer $1 -s 9307 \
+    adb shell test-mixer $1 -c 5 -s 9307 \
         -a /sdcard/aux9307gra.wav -o /sdcard/tm9307gra.wav \
-        sine:2,1000,3000 sine:1,2000,9307 chirp:2,9307
+        sine:4,1000,3000 sine:1,2000,9307 chirp:3,9307
     adb pull /sdcard/tm9307gra.wav $2
     adb pull /sdcard/aux9307gra.wav $2
 
diff --git a/services/audioflinger/tests/resampler_tests.cpp b/services/audioflinger/tests/resampler_tests.cpp
index 8624b62..d6217ba 100644
--- a/services/audioflinger/tests/resampler_tests.cpp
+++ b/services/audioflinger/tests/resampler_tests.cpp
@@ -29,6 +29,7 @@
 #include <math.h>
 #include <vector>
 #include <utility>
+#include <iostream>
 #include <cutils/log.h>
 #include <gtest/gtest.h>
 #include <media/AudioBufferProvider.h>
@@ -59,7 +60,7 @@
         int check = memcmp((const char*)reference + i * outputFrameSize,
                 (const char*)test + i * outputFrameSize, outputFrameSize);
         if (check) {
-            ALOGE("Failure at frame %d", i);
+            ALOGE("Failure at frame %zu", i);
             ASSERT_EQ(check, 0); /* fails */
         }
     }
@@ -153,6 +154,9 @@
     return accum / count;
 }
 
+// TI = resampler input type, int16_t or float
+// TO = resampler output type, int32_t or float
+template <typename TI, typename TO>
 void testStopbandDownconversion(size_t channels,
         unsigned inputFreq, unsigned outputFreq,
         unsigned passband, unsigned stopband,
@@ -161,20 +165,21 @@
     // create the provider
     std::vector<int> inputIncr;
     SignalProvider provider;
-    provider.setChirp<int16_t>(channels,
+    provider.setChirp<TI>(channels,
             0., inputFreq/2., inputFreq, inputFreq/2000.);
     provider.setIncr(inputIncr);
 
     // calculate the output size
     size_t outputFrames = ((int64_t) provider.getNumFrames() * outputFreq) / inputFreq;
-    size_t outputFrameSize = channels * sizeof(int32_t);
+    size_t outputFrameSize = channels * sizeof(TO);
     size_t outputSize = outputFrameSize * outputFrames;
     outputSize &= ~7;
 
     // create the resampler
     android::AudioResampler* resampler;
 
-    resampler = android::AudioResampler::create(AUDIO_FORMAT_PCM_16_BIT,
+    resampler = android::AudioResampler::create(
+            is_same<TI, int16_t>::value ? AUDIO_FORMAT_PCM_16_BIT : AUDIO_FORMAT_PCM_FLOAT,
             channels, outputFreq, quality);
     resampler->setSampleRate(inputFreq);
     resampler->setVolume(android::AudioResampler::UNITY_GAIN_FLOAT,
@@ -186,7 +191,7 @@
     void* reference = malloc(outputSize);
     resample(channels, reference, outputFrames, refIncr, &provider, resampler);
 
-    int32_t *out = reinterpret_cast<int32_t *>(reference);
+    TO *out = reinterpret_cast<TO *>(reference);
 
     // check signal energy in passband
     const unsigned passbandFrame = passband * outputFreq / 1000.;
@@ -206,10 +211,10 @@
                 provider.getNumFrames(), outputFrames,
                 passbandFrame, stopbandFrame, stopbandEnergy, passbandEnergy, dbAtten);
         for (size_t i = 0; i < 10; ++i) {
-            printf("%d\n", out[i+passbandFrame*channels]);
+            std::cout << out[i+passbandFrame*channels] << std::endl;
         }
         for (size_t i = 0; i < 10; ++i) {
-            printf("%d\n", out[i+stopbandFrame*channels]);
+            std::cout << out[i+stopbandFrame*channels] << std::endl;
         }
 #endif
     }
@@ -292,7 +297,7 @@
  * are properly suppressed.  It uses downsampling because the stopband can be
  * clearly isolated by input frequencies exceeding the output sample rate (nyquist).
  */
-TEST(audioflinger_resampler, stopbandresponse) {
+TEST(audioflinger_resampler, stopbandresponse_integer) {
     // not all of these may work (old resamplers fail on downsampling)
     static const enum android::AudioResampler::src_quality kQualityArray[] = {
             //android::AudioResampler::LOW_QUALITY,
@@ -307,13 +312,100 @@
     // in this test we assume a maximum transition band between 12kHz and 20kHz.
     // there must be at least 60dB relative attenuation between stopband and passband.
     for (size_t i = 0; i < ARRAY_SIZE(kQualityArray); ++i) {
-        testStopbandDownconversion(2, 48000, 32000, 12000, 20000, kQualityArray[i]);
+        testStopbandDownconversion<int16_t, int32_t>(
+                2, 48000, 32000, 12000, 20000, kQualityArray[i]);
     }
 
     // in this test we assume a maximum transition band between 7kHz and 15kHz.
     // there must be at least 60dB relative attenuation between stopband and passband.
     // (the weird ratio triggers interpolative resampling)
     for (size_t i = 0; i < ARRAY_SIZE(kQualityArray); ++i) {
-        testStopbandDownconversion(2, 48000, 22101, 7000, 15000, kQualityArray[i]);
+        testStopbandDownconversion<int16_t, int32_t>(
+                2, 48000, 22101, 7000, 15000, kQualityArray[i]);
     }
 }
+
+TEST(audioflinger_resampler, stopbandresponse_integer_multichannel) {
+    // not all of these may work (old resamplers fail on downsampling)
+    static const enum android::AudioResampler::src_quality kQualityArray[] = {
+            //android::AudioResampler::LOW_QUALITY,
+            //android::AudioResampler::MED_QUALITY,
+            //android::AudioResampler::HIGH_QUALITY,
+            //android::AudioResampler::VERY_HIGH_QUALITY,
+            android::AudioResampler::DYN_LOW_QUALITY,
+            android::AudioResampler::DYN_MED_QUALITY,
+            android::AudioResampler::DYN_HIGH_QUALITY,
+    };
+
+    // in this test we assume a maximum transition band between 12kHz and 20kHz.
+    // there must be at least 60dB relative attenuation between stopband and passband.
+    for (size_t i = 0; i < ARRAY_SIZE(kQualityArray); ++i) {
+        testStopbandDownconversion<int16_t, int32_t>(
+                8, 48000, 32000, 12000, 20000, kQualityArray[i]);
+    }
+
+    // in this test we assume a maximum transition band between 7kHz and 15kHz.
+    // there must be at least 60dB relative attenuation between stopband and passband.
+    // (the weird ratio triggers interpolative resampling)
+    for (size_t i = 0; i < ARRAY_SIZE(kQualityArray); ++i) {
+        testStopbandDownconversion<int16_t, int32_t>(
+                8, 48000, 22101, 7000, 15000, kQualityArray[i]);
+    }
+}
+
+TEST(audioflinger_resampler, stopbandresponse_float) {
+    // not all of these may work (old resamplers fail on downsampling)
+    static const enum android::AudioResampler::src_quality kQualityArray[] = {
+            //android::AudioResampler::LOW_QUALITY,
+            //android::AudioResampler::MED_QUALITY,
+            //android::AudioResampler::HIGH_QUALITY,
+            //android::AudioResampler::VERY_HIGH_QUALITY,
+            android::AudioResampler::DYN_LOW_QUALITY,
+            android::AudioResampler::DYN_MED_QUALITY,
+            android::AudioResampler::DYN_HIGH_QUALITY,
+    };
+
+    // in this test we assume a maximum transition band between 12kHz and 20kHz.
+    // there must be at least 60dB relative attenuation between stopband and passband.
+    for (size_t i = 0; i < ARRAY_SIZE(kQualityArray); ++i) {
+        testStopbandDownconversion<float, float>(
+                2, 48000, 32000, 12000, 20000, kQualityArray[i]);
+    }
+
+    // in this test we assume a maximum transition band between 7kHz and 15kHz.
+    // there must be at least 60dB relative attenuation between stopband and passband.
+    // (the weird ratio triggers interpolative resampling)
+    for (size_t i = 0; i < ARRAY_SIZE(kQualityArray); ++i) {
+        testStopbandDownconversion<float, float>(
+                2, 48000, 22101, 7000, 15000, kQualityArray[i]);
+    }
+}
+
+TEST(audioflinger_resampler, stopbandresponse_float_multichannel) {
+    // not all of these may work (old resamplers fail on downsampling)
+    static const enum android::AudioResampler::src_quality kQualityArray[] = {
+            //android::AudioResampler::LOW_QUALITY,
+            //android::AudioResampler::MED_QUALITY,
+            //android::AudioResampler::HIGH_QUALITY,
+            //android::AudioResampler::VERY_HIGH_QUALITY,
+            android::AudioResampler::DYN_LOW_QUALITY,
+            android::AudioResampler::DYN_MED_QUALITY,
+            android::AudioResampler::DYN_HIGH_QUALITY,
+    };
+
+    // in this test we assume a maximum transition band between 12kHz and 20kHz.
+    // there must be at least 60dB relative attenuation between stopband and passband.
+    for (size_t i = 0; i < ARRAY_SIZE(kQualityArray); ++i) {
+        testStopbandDownconversion<float, float>(
+                8, 48000, 32000, 12000, 20000, kQualityArray[i]);
+    }
+
+    // in this test we assume a maximum transition band between 7kHz and 15kHz.
+    // there must be at least 60dB relative attenuation between stopband and passband.
+    // (the weird ratio triggers interpolative resampling)
+    for (size_t i = 0; i < ARRAY_SIZE(kQualityArray); ++i) {
+        testStopbandDownconversion<float, float>(
+                8, 48000, 22101, 7000, 15000, kQualityArray[i]);
+    }
+}
+
diff --git a/services/audioflinger/tests/test-mixer.cpp b/services/audioflinger/tests/test-mixer.cpp
index 3940702..9a4fad6 100644
--- a/services/audioflinger/tests/test-mixer.cpp
+++ b/services/audioflinger/tests/test-mixer.cpp
@@ -36,11 +36,12 @@
 using namespace android;
 
 static void usage(const char* name) {
-    fprintf(stderr, "Usage: %s [-f] [-m]"
+    fprintf(stderr, "Usage: %s [-f] [-m] [-c channels]"
                     " [-s sample-rate] [-o <output-file>] [-a <aux-buffer-file>] [-P csv]"
                     " (<input-file> | <command>)+\n", name);
     fprintf(stderr, "    -f    enable floating point input track\n");
     fprintf(stderr, "    -m    enable floating point mixer output\n");
+    fprintf(stderr, "    -c    number of mixer output channels\n");
     fprintf(stderr, "    -s    mixer sample-rate\n");
     fprintf(stderr, "    -o    <output-file> WAV file, pcm16 (or float if -m specified)\n");
     fprintf(stderr, "    -a    <aux-buffer-file>\n");
@@ -61,7 +62,7 @@
     info.samplerate = sampleRate;
     info.channels = channels;
     info.format = SF_FORMAT_WAV | (isBufferFloat ? SF_FORMAT_FLOAT : SF_FORMAT_PCM_16);
-    printf("saving file:%s  channels:%d  samplerate:%d  frames:%d\n",
+    printf("saving file:%s  channels:%u  samplerate:%u  frames:%zu\n",
             filename, info.channels, info.samplerate, frames);
     SNDFILE *sf = sf_open(filename, SFM_WRITE, &info);
     if (sf == NULL) {
@@ -90,7 +91,7 @@
     std::vector<int32_t> Names;
     std::vector<SignalProvider> Providers;
 
-    for (int ch; (ch = getopt(argc, argv, "fms:o:a:P:")) != -1;) {
+    for (int ch; (ch = getopt(argc, argv, "fmc:s:o:a:P:")) != -1;) {
         switch (ch) {
         case 'f':
             useInputFloat = true;
@@ -98,6 +99,9 @@
         case 'm':
             useMixerFloat = true;
             break;
+        case 'c':
+            outputChannels = atoi(optarg);
+            break;
         case 's':
             outputSampleRate = atoi(optarg);
             break;
@@ -160,7 +164,7 @@
 
             parseCSV(argv[i] + strlen(sine), v);
             if (v.size() == 3) {
-                printf("creating sine(%d %d)\n", v[0], v[1]);
+                printf("creating sine(%d %d %d)\n", v[0], v[1], v[2]);
                 if (useInputFloat) {
                     Providers[i].setSine<float>(v[0], v[1], v[2], kSeconds);
                 } else {
@@ -191,6 +195,8 @@
     const size_t outputFrameSize = outputChannels
             * (useMixerFloat ? sizeof(float) : sizeof(int16_t));
     const size_t outputSize = outputFrames * outputFrameSize;
+    const audio_channel_mask_t outputChannelMask =
+            audio_channel_out_mask_from_count(outputChannels);
     void *outputAddr = NULL;
     (void) posix_memalign(&outputAddr, 32, outputSize);
     memset(outputAddr, 0, outputSize);
@@ -224,15 +230,29 @@
         Names.push_back(name);
         mixer->setBufferProvider(name, &Providers[i]);
         mixer->setParameter(name, AudioMixer::TRACK, AudioMixer::MAIN_BUFFER,
-                (void *) outputAddr);
+                (void *)outputAddr);
         mixer->setParameter(
                 name,
                 AudioMixer::TRACK,
-                AudioMixer::MIXER_FORMAT, (void *)mixerFormat);
-        mixer->setParameter(name, AudioMixer::TRACK, AudioMixer::FORMAT,
+                AudioMixer::MIXER_FORMAT,
+                (void *)(uintptr_t)mixerFormat);
+        mixer->setParameter(
+                name,
+                AudioMixer::TRACK,
+                AudioMixer::FORMAT,
                 (void *)(uintptr_t)inputFormat);
         mixer->setParameter(
                 name,
+                AudioMixer::TRACK,
+                AudioMixer::MIXER_CHANNEL_MASK,
+                (void *)(uintptr_t)outputChannelMask);
+        mixer->setParameter(
+                name,
+                AudioMixer::TRACK,
+                AudioMixer::CHANNEL_MASK,
+                (void *)(uintptr_t)channelMask);
+        mixer->setParameter(
+                name,
                 AudioMixer::RESAMPLE,
                 AudioMixer::SAMPLE_RATE,
                 (void *)(uintptr_t)Providers[i].getSampleRate());
diff --git a/services/audioflinger/tests/test_utils.h b/services/audioflinger/tests/test_utils.h
index f954292..3d51cdc 100644
--- a/services/audioflinger/tests/test_utils.h
+++ b/services/audioflinger/tests/test_utils.h
@@ -120,7 +120,7 @@
         }
         if (!mInputIncr.empty()) {
             size_t provided = mInputIncr[mNextIdx++];
-            ALOGV("getNextBuffer() mValue[%d]=%u not %u",
+            ALOGV("getNextBuffer() mValue[%zu]=%zu not %zu",
                     mNextIdx-1, provided, buffer->frameCount);
             if (provided < buffer->frameCount) {
                 buffer->frameCount = provided;
@@ -129,8 +129,8 @@
                 mNextIdx = 0;
             }
         }
-        ALOGV("getNextBuffer() requested %u frames out of %u frames available"
-                " and returned %u frames\n",
+        ALOGV("getNextBuffer() requested %zu frames out of %zu frames available"
+                " and returned %zu frames",
                 requestedFrames, mNumFrames - mNextFrame, buffer->frameCount);
         mUnrel = buffer->frameCount;
         if (buffer->frameCount > 0) {
@@ -145,14 +145,14 @@
     virtual void releaseBuffer(Buffer* buffer)
     {
         if (buffer->frameCount > mUnrel) {
-            ALOGE("releaseBuffer() released %u frames but only %u available "
-                    "to release\n", buffer->frameCount, mUnrel);
+            ALOGE("releaseBuffer() released %zu frames but only %zu available "
+                    "to release", buffer->frameCount, mUnrel);
             mNextFrame += mUnrel;
             mUnrel = 0;
         } else {
 
-            ALOGV("releaseBuffer() released %u frames out of %u frames available "
-                    "to release\n", buffer->frameCount, mUnrel);
+            ALOGV("releaseBuffer() released %zu frames out of %zu frames available "
+                    "to release", buffer->frameCount, mUnrel);
             mNextFrame += buffer->frameCount;
             mUnrel -= buffer->frameCount;
         }
@@ -195,7 +195,7 @@
         T yt = convertValue<T>(y);
 
         for (size_t j = 0; j < channels; ++j) {
-            buffer[i*channels + j] = yt / (j + 1);
+            buffer[i*channels + j] = yt / T(j + 1);
         }
     }
 }
@@ -221,7 +221,7 @@
         T yt = convertValue<T>(y);
 
         for (size_t j = 0; j < channels; ++j) {
-            buffer[i*channels + j] = yt / (j + 1);
+            buffer[i*channels + j] = yt / T(j + 1);
         }
     }
 }
diff --git a/services/audiopolicy/Android.mk b/services/audiopolicy/Android.mk
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/AudioPolicyClientImpl.cpp b/services/audiopolicy/AudioPolicyClientImpl.cpp
index c322d92..c0019d1 100644
--- a/services/audiopolicy/AudioPolicyClientImpl.cpp
+++ b/services/audiopolicy/AudioPolicyClientImpl.cpp
@@ -35,22 +35,20 @@
     return af->loadHwModule(name);
 }
 
-audio_io_handle_t AudioPolicyService::AudioPolicyClient::openOutput(audio_module_handle_t module,
-                               audio_devices_t *pDevices,
-                               uint32_t *pSamplingRate,
-                               audio_format_t *pFormat,
-                               audio_channel_mask_t *pChannelMask,
-                               uint32_t *pLatencyMs,
-                               audio_output_flags_t flags,
-                               const audio_offload_info_t *offloadInfo)
+status_t AudioPolicyService::AudioPolicyClient::openOutput(audio_module_handle_t module,
+                                                           audio_io_handle_t *output,
+                                                           audio_config_t *config,
+                                                           audio_devices_t *devices,
+                                                           const String8& address,
+                                                           uint32_t *latencyMs,
+                                                           audio_output_flags_t flags)
 {
     sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
     if (af == 0) {
         ALOGW("%s: could not get AudioFlinger", __func__);
-        return 0;
+        return PERMISSION_DENIED;
     }
-    return af->openOutput(module, pDevices, pSamplingRate, pFormat, pChannelMask,
-                          pLatencyMs, flags, offloadInfo);
+    return af->openOutput(module, output, config, devices, address, latencyMs, flags);
 }
 
 audio_io_handle_t AudioPolicyService::AudioPolicyClient::openDuplicateOutput(
@@ -97,19 +95,21 @@
     return af->restoreOutput(output);
 }
 
-audio_io_handle_t AudioPolicyService::AudioPolicyClient::openInput(audio_module_handle_t module,
-                              audio_devices_t *pDevices,
-                              uint32_t *pSamplingRate,
-                              audio_format_t *pFormat,
-                              audio_channel_mask_t *pChannelMask)
+status_t AudioPolicyService::AudioPolicyClient::openInput(audio_module_handle_t module,
+                                                          audio_io_handle_t *input,
+                                                          audio_config_t *config,
+                                                          audio_devices_t *device,
+                                                          const String8& address,
+                                                          audio_source_t source,
+                                                          audio_input_flags_t flags)
 {
     sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
     if (af == 0) {
         ALOGW("%s: could not get AudioFlinger", __func__);
-        return 0;
+        return PERMISSION_DENIED;
     }
 
-    return af->openInput(module, pDevices, pSamplingRate, pFormat, pChannelMask);
+    return af->openInput(module, input, config, device, address, source, flags);
 }
 
 status_t AudioPolicyService::AudioPolicyClient::closeInput(audio_io_handle_t input)
@@ -212,4 +212,9 @@
     mAudioPolicyService->onAudioPatchListUpdate();
 }
 
+audio_unique_id_t AudioPolicyService::AudioPolicyClient::newAudioUniqueId()
+{
+    return AudioSystem::newAudioUniqueId();
+}
+
 }; // namespace android
diff --git a/services/audiopolicy/AudioPolicyClientImplLegacy.cpp b/services/audiopolicy/AudioPolicyClientImplLegacy.cpp
index 53f3e2d..9639096 100644
--- a/services/audiopolicy/AudioPolicyClientImplLegacy.cpp
+++ b/services/audiopolicy/AudioPolicyClientImplLegacy.cpp
@@ -62,6 +62,46 @@
     return af->loadHwModule(name);
 }
 
+static audio_io_handle_t open_output(audio_module_handle_t module,
+                                    audio_devices_t *pDevices,
+                                    uint32_t *pSamplingRate,
+                                    audio_format_t *pFormat,
+                                    audio_channel_mask_t *pChannelMask,
+                                    uint32_t *pLatencyMs,
+                                    audio_output_flags_t flags,
+                                    const audio_offload_info_t *offloadInfo)
+{
+    sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
+    if (af == 0) {
+        ALOGW("%s: could not get AudioFlinger", __func__);
+        return AUDIO_IO_HANDLE_NONE;
+    }
+
+    if (pSamplingRate == NULL || pFormat == NULL || pChannelMask == NULL ||
+            pDevices == NULL || pLatencyMs == NULL) {
+        return AUDIO_IO_HANDLE_NONE;
+    }
+    audio_config_t config = AUDIO_CONFIG_INITIALIZER;
+    config.sample_rate = *pSamplingRate;
+    config.format = *pFormat;
+    config.channel_mask = *pChannelMask;
+    if (offloadInfo != NULL) {
+        config.offload_info = *offloadInfo;
+    }
+    audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
+    status_t status = af->openOutput(module, &output, &config, pDevices,
+                                     String8(""), pLatencyMs, flags);
+    if (status == NO_ERROR) {
+        *pSamplingRate = config.sample_rate;
+        *pFormat = config.format;
+        *pChannelMask = config.channel_mask;
+        if (offloadInfo != NULL) {
+            *offloadInfo = config.offload_info;
+        }
+    }
+    return output;
+}
+
 // deprecated: replaced by aps_open_output_on_module()
 audio_io_handle_t aps_open_output(void *service __unused,
                                          audio_devices_t *pDevices,
@@ -71,14 +111,8 @@
                                          uint32_t *pLatencyMs,
                                          audio_output_flags_t flags)
 {
-    sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
-    if (af == 0) {
-        ALOGW("%s: could not get AudioFlinger", __func__);
-        return 0;
-    }
-
-    return af->openOutput((audio_module_handle_t)0, pDevices, pSamplingRate, pFormat, pChannelMask,
-                          pLatencyMs, flags);
+    return open_output((audio_module_handle_t)0, pDevices, pSamplingRate, pFormat, pChannelMask,
+                          pLatencyMs, flags, NULL);
 }
 
 audio_io_handle_t aps_open_output_on_module(void *service __unused,
@@ -91,12 +125,7 @@
                                                    audio_output_flags_t flags,
                                                    const audio_offload_info_t *offloadInfo)
 {
-    sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
-    if (af == 0) {
-        ALOGW("%s: could not get AudioFlinger", __func__);
-        return 0;
-    }
-    return af->openOutput(module, pDevices, pSamplingRate, pFormat, pChannelMask,
+    return open_output(module, pDevices, pSamplingRate, pFormat, pChannelMask,
                           pLatencyMs, flags, offloadInfo);
 }
 
@@ -144,6 +173,37 @@
     return af->restoreOutput(output);
 }
 
+static audio_io_handle_t open_input(audio_module_handle_t module,
+                                    audio_devices_t *pDevices,
+                                    uint32_t *pSamplingRate,
+                                    audio_format_t *pFormat,
+                                    audio_channel_mask_t *pChannelMask)
+{
+    sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
+    if (af == 0) {
+        ALOGW("%s: could not get AudioFlinger", __func__);
+        return AUDIO_IO_HANDLE_NONE;
+    }
+
+    if (pSamplingRate == NULL || pFormat == NULL || pChannelMask == NULL || pDevices == NULL) {
+        return AUDIO_IO_HANDLE_NONE;
+    }
+    audio_config_t config = AUDIO_CONFIG_INITIALIZER;;
+    config.sample_rate = *pSamplingRate;
+    config.format = *pFormat;
+    config.channel_mask = *pChannelMask;
+    audio_io_handle_t input = AUDIO_IO_HANDLE_NONE;
+    status_t status = af->openInput(module, &input, &config, pDevices,
+                                    String8(""), AUDIO_SOURCE_MIC, AUDIO_INPUT_FLAG_FAST /*FIXME*/);
+    if (status == NO_ERROR) {
+        *pSamplingRate = config.sample_rate;
+        *pFormat = config.format;
+        *pChannelMask = config.channel_mask;
+    }
+    return input;
+}
+
+
 // deprecated: replaced by aps_open_input_on_module(), and acoustics parameter is ignored
 audio_io_handle_t aps_open_input(void *service __unused,
                                         audio_devices_t *pDevices,
@@ -152,13 +212,7 @@
                                         audio_channel_mask_t *pChannelMask,
                                         audio_in_acoustics_t acoustics __unused)
 {
-    sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
-    if (af == 0) {
-        ALOGW("%s: could not get AudioFlinger", __func__);
-        return 0;
-    }
-
-    return af->openInput((audio_module_handle_t)0, pDevices, pSamplingRate, pFormat, pChannelMask);
+    return  open_input((audio_module_handle_t)0, pDevices, pSamplingRate, pFormat, pChannelMask);
 }
 
 audio_io_handle_t aps_open_input_on_module(void *service __unused,
@@ -168,13 +222,7 @@
                                                   audio_format_t *pFormat,
                                                   audio_channel_mask_t *pChannelMask)
 {
-    sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
-    if (af == 0) {
-        ALOGW("%s: could not get AudioFlinger", __func__);
-        return 0;
-    }
-
-    return af->openInput(module, pDevices, pSamplingRate, pFormat, pChannelMask);
+    return  open_input(module, pDevices, pSamplingRate, pFormat, pChannelMask);
 }
 
 int aps_close_input(void *service __unused, audio_io_handle_t input)
diff --git a/services/audiopolicy/AudioPolicyEffects.cpp b/services/audiopolicy/AudioPolicyEffects.cpp
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/AudioPolicyEffects.h b/services/audiopolicy/AudioPolicyEffects.h
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/AudioPolicyInterface.h b/services/audiopolicy/AudioPolicyInterface.h
index 33e4397..50ee803 100644
--- a/services/audiopolicy/AudioPolicyInterface.h
+++ b/services/audiopolicy/AudioPolicyInterface.h
@@ -112,13 +112,17 @@
                                     uint32_t samplingRate,
                                     audio_format_t format,
                                     audio_channel_mask_t channelMask,
-                                    audio_in_acoustics_t acoustics) = 0;
+                                    audio_session_t session,
+                                    audio_input_flags_t flags) = 0;
     // indicates to the audio policy manager that the input starts being used.
-    virtual status_t startInput(audio_io_handle_t input) = 0;
+    virtual status_t startInput(audio_io_handle_t input,
+                                audio_session_t session) = 0;
     // indicates to the audio policy manager that the input stops being used.
-    virtual status_t stopInput(audio_io_handle_t input) = 0;
+    virtual status_t stopInput(audio_io_handle_t input,
+                               audio_session_t session) = 0;
     // releases the input.
-    virtual void releaseInput(audio_io_handle_t input) = 0;
+    virtual void releaseInput(audio_io_handle_t input,
+                              audio_session_t session) = 0;
 
     //
     // volume control functions
@@ -210,14 +214,13 @@
     // in case the audio policy manager has no specific requirements for the output being opened.
     // When the function returns, the parameter values reflect the actual values used by the audio hardware output stream.
     // The audio policy manager can check if the proposed parameters are suitable or not and act accordingly.
-    virtual audio_io_handle_t openOutput(audio_module_handle_t module,
-                                         audio_devices_t *pDevices,
-                                         uint32_t *pSamplingRate,
-                                         audio_format_t *pFormat,
-                                         audio_channel_mask_t *pChannelMask,
-                                         uint32_t *pLatencyMs,
-                                         audio_output_flags_t flags,
-                                         const audio_offload_info_t *offloadInfo = NULL) = 0;
+    virtual status_t openOutput(audio_module_handle_t module,
+                                audio_io_handle_t *output,
+                                audio_config_t *config,
+                                audio_devices_t *devices,
+                                const String8& address,
+                                uint32_t *latencyMs,
+                                audio_output_flags_t flags) = 0;
     // creates a special output that is duplicated to the two outputs passed as arguments. The duplication is performed by
     // a special mixer thread in the AudioFlinger.
     virtual audio_io_handle_t openDuplicateOutput(audio_io_handle_t output1, audio_io_handle_t output2) = 0;
@@ -234,11 +237,13 @@
     //
 
     // opens an audio input
-    virtual audio_io_handle_t openInput(audio_module_handle_t module,
-                                        audio_devices_t *pDevices,
-                                        uint32_t *pSamplingRate,
-                                        audio_format_t *pFormat,
-                                        audio_channel_mask_t *pChannelMask) = 0;
+    virtual status_t openInput(audio_module_handle_t module,
+                               audio_io_handle_t *input,
+                               audio_config_t *config,
+                               audio_devices_t *device,
+                               const String8& address,
+                               audio_source_t source,
+                               audio_input_flags_t flags) = 0;
     // closes an audio input
     virtual status_t closeInput(audio_io_handle_t input) = 0;
     //
@@ -285,6 +290,8 @@
     virtual void onAudioPortListUpdate() = 0;
 
     virtual void onAudioPatchListUpdate() = 0;
+
+    virtual audio_unique_id_t newAudioUniqueId() = 0;
 };
 
 extern "C" AudioPolicyInterface* createAudioPolicyManager(AudioPolicyClientInterface *clientInterface);
diff --git a/services/audiopolicy/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/AudioPolicyInterfaceImpl.cpp
old mode 100755
new mode 100644
index 5a13ac2..75745b3
--- a/services/audiopolicy/AudioPolicyInterfaceImpl.cpp
+++ b/services/audiopolicy/AudioPolicyInterfaceImpl.cpp
@@ -214,7 +214,8 @@
                                     uint32_t samplingRate,
                                     audio_format_t format,
                                     audio_channel_mask_t channelMask,
-                                    int audioSession)
+                                    int audioSession,
+                                    audio_input_flags_t flags)
 {
     if (mAudioPolicyManager == NULL) {
         return 0;
@@ -231,7 +232,8 @@
     Mutex::Autolock _l(mLock);
     // the audio_in_acoustics_t parameter is ignored by get_input()
     audio_io_handle_t input = mAudioPolicyManager->getInput(inputSource, samplingRate,
-                                                   format, channelMask, (audio_in_acoustics_t) 0);
+                                                   format, channelMask,
+                                                   (audio_session_t)audioSession, flags);
 
     if (input == 0) {
         return input;
@@ -246,33 +248,36 @@
     return input;
 }
 
-status_t AudioPolicyService::startInput(audio_io_handle_t input)
+status_t AudioPolicyService::startInput(audio_io_handle_t input,
+                                        audio_session_t session)
 {
     if (mAudioPolicyManager == NULL) {
         return NO_INIT;
     }
     Mutex::Autolock _l(mLock);
 
-    return mAudioPolicyManager->startInput(input);
+    return mAudioPolicyManager->startInput(input, session);
 }
 
-status_t AudioPolicyService::stopInput(audio_io_handle_t input)
+status_t AudioPolicyService::stopInput(audio_io_handle_t input,
+                                       audio_session_t session)
 {
     if (mAudioPolicyManager == NULL) {
         return NO_INIT;
     }
     Mutex::Autolock _l(mLock);
 
-    return mAudioPolicyManager->stopInput(input);
+    return mAudioPolicyManager->stopInput(input, session);
 }
 
-void AudioPolicyService::releaseInput(audio_io_handle_t input)
+void AudioPolicyService::releaseInput(audio_io_handle_t input,
+                                      audio_session_t session)
 {
     if (mAudioPolicyManager == NULL) {
         return;
     }
     Mutex::Autolock _l(mLock);
-    mAudioPolicyManager->releaseInput(input);
+    mAudioPolicyManager->releaseInput(input, session);
 
     // release audio processors from the input
     status_t status = mAudioPolicyEffects->releaseInputEffects(input);
diff --git a/services/audiopolicy/AudioPolicyInterfaceImplLegacy.cpp b/services/audiopolicy/AudioPolicyInterfaceImplLegacy.cpp
old mode 100755
new mode 100644
index 0a246f2..aa46ace
--- a/services/audiopolicy/AudioPolicyInterfaceImplLegacy.cpp
+++ b/services/audiopolicy/AudioPolicyInterfaceImplLegacy.cpp
@@ -202,7 +202,8 @@
                                     uint32_t samplingRate,
                                     audio_format_t format,
                                     audio_channel_mask_t channelMask,
-                                    int audioSession)
+                                    int audioSession,
+                                    audio_input_flags_t flags __unused)
 {
     if (mpAudioPolicy == NULL) {
         return 0;
@@ -234,7 +235,8 @@
     return input;
 }
 
-status_t AudioPolicyService::startInput(audio_io_handle_t input)
+status_t AudioPolicyService::startInput(audio_io_handle_t input,
+                                        audio_session_t session __unused)
 {
     if (mpAudioPolicy == NULL) {
         return NO_INIT;
@@ -244,7 +246,8 @@
     return mpAudioPolicy->start_input(mpAudioPolicy, input);
 }
 
-status_t AudioPolicyService::stopInput(audio_io_handle_t input)
+status_t AudioPolicyService::stopInput(audio_io_handle_t input,
+                                       audio_session_t session __unused)
 {
     if (mpAudioPolicy == NULL) {
         return NO_INIT;
@@ -254,7 +257,8 @@
     return mpAudioPolicy->stop_input(mpAudioPolicy, input);
 }
 
-void AudioPolicyService::releaseInput(audio_io_handle_t input)
+void AudioPolicyService::releaseInput(audio_io_handle_t input,
+                                      audio_session_t session __unused)
 {
     if (mpAudioPolicy == NULL) {
         return;
diff --git a/services/audiopolicy/AudioPolicyManager.cpp b/services/audiopolicy/AudioPolicyManager.cpp
index f2320de..65d52d0 100644
--- a/services/audiopolicy/AudioPolicyManager.cpp
+++ b/services/audiopolicy/AudioPolicyManager.cpp
@@ -30,6 +30,10 @@
 // A device mask for all audio output devices that are considered "remote" when evaluating
 // active output devices in isStreamActiveRemotely()
 #define APM_AUDIO_OUT_DEVICE_REMOTE_ALL  AUDIO_DEVICE_OUT_REMOTE_SUBMIX
+// A device mask for all audio input and output devices where matching inputs/outputs on device
+// type alone is not enough: the address must match too
+#define APM_AUDIO_DEVICE_MATCH_ADDRESS_ALL (AUDIO_DEVICE_IN_REMOTE_SUBMIX | \
+                                            AUDIO_DEVICE_OUT_REMOTE_SUBMIX)
 
 #include <inttypes.h>
 #include <math.h>
@@ -82,6 +86,7 @@
     STRING_TO_ENUM(AUDIO_DEVICE_OUT_HDMI_ARC),
     STRING_TO_ENUM(AUDIO_DEVICE_OUT_SPDIF),
     STRING_TO_ENUM(AUDIO_DEVICE_OUT_FM),
+    STRING_TO_ENUM(AUDIO_DEVICE_OUT_AUX_LINE),
     STRING_TO_ENUM(AUDIO_DEVICE_IN_BUILTIN_MIC),
     STRING_TO_ENUM(AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET),
     STRING_TO_ENUM(AUDIO_DEVICE_IN_ALL_SCO),
@@ -143,6 +148,7 @@
 const StringToEnum sOutChannelsNameToEnumTable[] = {
     STRING_TO_ENUM(AUDIO_CHANNEL_OUT_MONO),
     STRING_TO_ENUM(AUDIO_CHANNEL_OUT_STEREO),
+    STRING_TO_ENUM(AUDIO_CHANNEL_OUT_QUAD),
     STRING_TO_ENUM(AUDIO_CHANNEL_OUT_5POINT1),
     STRING_TO_ENUM(AUDIO_CHANNEL_OUT_7POINT1),
 };
@@ -228,14 +234,6 @@
             }
             ALOGV("setDeviceConnectionState() connecting device %x", device);
 
-            if (checkOutputsForDevice(device, state, outputs, address) != NO_ERROR) {
-                return INVALID_OPERATION;
-            }
-            // outputs should never be empty here
-            ALOG_ASSERT(outputs.size() != 0, "setDeviceConnectionState():"
-                    "checkOutputsForDevice() returned no outputs but status OK");
-            ALOGV("setDeviceConnectionState() checkOutputsForDevice() returned %zu outputs",
-                  outputs.size());
             // register new device as available
             index = mAvailableOutputDevices.add(devDesc);
             if (index >= 0) {
@@ -248,6 +246,15 @@
                 return NO_MEMORY;
             }
 
+            if (checkOutputsForDevice(device, state, outputs, address) != NO_ERROR) {
+                mAvailableOutputDevices.remove(devDesc);
+                return INVALID_OPERATION;
+            }
+            // outputs should never be empty here
+            ALOG_ASSERT(outputs.size() != 0, "setDeviceConnectionState():"
+                    "checkOutputsForDevice() returned no outputs but status OK");
+            ALOGV("setDeviceConnectionState() checkOutputsForDevice() returned %zu outputs",
+                  outputs.size());
             break;
         // handle output device disconnection
         case AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE: {
@@ -261,8 +268,6 @@
             mAvailableOutputDevices.remove(devDesc);
 
             checkOutputsForDevice(device, state, outputs, address);
-            // not currently handling multiple simultaneous submixes: ignoring remote submix
-            //   case and address
             } break;
 
         default:
@@ -295,10 +300,13 @@
             // do not force device change on duplicated output because if device is 0, it will
             // also force a device 0 for the two outputs it is duplicated to which may override
             // a valid device selection on those outputs.
+            bool force = !mOutputs.valueAt(i)->isDuplicated()
+                    && (!deviceDistinguishesOnAddress(device)
+                            // always force when disconnecting (a non-duplicated device)
+                            || (state == AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE));
             setOutputDevice(mOutputs.keyAt(i),
                             getNewOutputDevice(mOutputs.keyAt(i), true /*fromCache*/),
-                            !mOutputs.valueAt(i)->isDuplicated(),
-                            0);
+                            force, 0);
         }
 
         mpClientInterface->onAudioPortListUpdate();
@@ -617,20 +625,10 @@
         }
         for (size_t j = 0; j < mHwModules[i]->mOutputProfiles.size(); j++) {
             sp<IOProfile> profile = mHwModules[i]->mOutputProfiles[j];
-            bool found = false;
-            if (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
-                if (profile->isCompatibleProfile(device, samplingRate, format,
-                                           channelMask,
-                                           AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)) {
-                    found = true;
-                }
-            } else {
-                if (profile->isCompatibleProfile(device, samplingRate, format,
-                                           channelMask,
-                                           AUDIO_OUTPUT_FLAG_DIRECT)) {
-                    found = true;
-                }
-            }
+            bool found = profile->isCompatibleProfile(device, samplingRate,
+                    NULL /*updatedSamplingRate*/, format, channelMask,
+                    flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD ?
+                        AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD : AUDIO_OUTPUT_FLAG_DIRECT);
             if (found && (mAvailableOutputDevices.types() & profile->mSupportedDevices.types())) {
                 return profile;
             }
@@ -690,8 +688,9 @@
         audio_output_flags_t flags,
         const audio_offload_info_t *offloadInfo)
 {
-    audio_io_handle_t output = 0;
+    audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
     uint32_t latency = 0;
+    status_t status;
 
 #ifdef AUDIO_POLICY_TEST
     if (mCurOutput != 0) {
@@ -702,21 +701,26 @@
             ALOGV("getOutput() opening test output");
             sp<AudioOutputDescriptor> outputDesc = new AudioOutputDescriptor(NULL);
             outputDesc->mDevice = mTestDevice;
-            outputDesc->mSamplingRate = mTestSamplingRate;
-            outputDesc->mFormat = mTestFormat;
-            outputDesc->mChannelMask = mTestChannels;
             outputDesc->mLatency = mTestLatencyMs;
             outputDesc->mFlags =
                     (audio_output_flags_t)(mDirectOutput ? AUDIO_OUTPUT_FLAG_DIRECT : 0);
             outputDesc->mRefCount[stream] = 0;
-            mTestOutputs[mCurOutput] = mpClientInterface->openOutput(0, &outputDesc->mDevice,
-                                            &outputDesc->mSamplingRate,
-                                            &outputDesc->mFormat,
-                                            &outputDesc->mChannelMask,
-                                            &outputDesc->mLatency,
-                                            outputDesc->mFlags,
-                                            offloadInfo);
-            if (mTestOutputs[mCurOutput]) {
+            audio_config_t config = AUDIO_CONFIG_INITIALIZER;
+            config.sample_rate = mTestSamplingRate;
+            config.channel_mask = mTestChannels;
+            config.format = mTestFormat;
+            config.offload_info = *offloadInfo;
+            status = mpClientInterface->openOutput(0,
+                                                  &mTestOutputs[mCurOutput],
+                                                  &config,
+                                                  &outputDesc->mDevice,
+                                                  String8(""),
+                                                  &outputDesc->mLatency,
+                                                  outputDesc->mFlags);
+            if (status == NO_ERROR) {
+                outputDesc->mSamplingRate = config.sample_rate;
+                outputDesc->mFormat = config.format;
+                outputDesc->mChannelMask = config.channel_mask;
                 AudioParameter outputCmd = AudioParameter();
                 outputCmd.addInt(String8("set_id"),mCurOutput);
                 mpClientInterface->setParameters(mTestOutputs[mCurOutput],outputCmd.toString());
@@ -774,37 +778,42 @@
         }
         outputDesc = new AudioOutputDescriptor(profile);
         outputDesc->mDevice = device;
-        outputDesc->mSamplingRate = samplingRate;
-        outputDesc->mFormat = format;
-        outputDesc->mChannelMask = channelMask;
         outputDesc->mLatency = 0;
         outputDesc->mFlags =(audio_output_flags_t) (outputDesc->mFlags | flags);
-        outputDesc->mRefCount[stream] = 0;
-        outputDesc->mStopTime[stream] = 0;
-        outputDesc->mDirectOpenCount = 1;
-        output = mpClientInterface->openOutput(profile->mModule->mHandle,
-                                        &outputDesc->mDevice,
-                                        &outputDesc->mSamplingRate,
-                                        &outputDesc->mFormat,
-                                        &outputDesc->mChannelMask,
-                                        &outputDesc->mLatency,
-                                        outputDesc->mFlags,
-                                        offloadInfo);
+        audio_config_t config = AUDIO_CONFIG_INITIALIZER;
+        config.sample_rate = samplingRate;
+        config.channel_mask = channelMask;
+        config.format = format;
+        config.offload_info = *offloadInfo;
+        status = mpClientInterface->openOutput(profile->mModule->mHandle,
+                                               &output,
+                                               &config,
+                                               &outputDesc->mDevice,
+                                               String8(""),
+                                               &outputDesc->mLatency,
+                                               outputDesc->mFlags);
 
         // only accept an output with the requested parameters
-        if (output == 0 ||
-            (samplingRate != 0 && samplingRate != outputDesc->mSamplingRate) ||
-            (format != AUDIO_FORMAT_DEFAULT && format != outputDesc->mFormat) ||
-            (channelMask != 0 && channelMask != outputDesc->mChannelMask)) {
+        if (status != NO_ERROR ||
+            (samplingRate != 0 && samplingRate != config.sample_rate) ||
+            (format != AUDIO_FORMAT_DEFAULT && format != config.format) ||
+            (channelMask != 0 && channelMask != config.channel_mask)) {
             ALOGV("getOutput() failed opening direct output: output %d samplingRate %d %d,"
                     "format %d %d, channelMask %04x %04x", output, samplingRate,
                     outputDesc->mSamplingRate, format, outputDesc->mFormat, channelMask,
                     outputDesc->mChannelMask);
-            if (output != 0) {
+            if (output != AUDIO_IO_HANDLE_NONE) {
                 mpClientInterface->closeOutput(output);
             }
-            return 0;
+            return AUDIO_IO_HANDLE_NONE;
         }
+        outputDesc->mSamplingRate = config.sample_rate;
+        outputDesc->mChannelMask = config.channel_mask;
+        outputDesc->mFormat = config.format;
+        outputDesc->mRefCount[stream] = 0;
+        outputDesc->mStopTime[stream] = 0;
+        outputDesc->mDirectOpenCount = 1;
+
         audio_io_handle_t srcOutput = getOutputForEffect();
         addOutput(output, outputDesc);
         audio_io_handle_t dstOutput = getOutputForEffect();
@@ -1055,21 +1064,22 @@
                                     uint32_t samplingRate,
                                     audio_format_t format,
                                     audio_channel_mask_t channelMask,
-                                    audio_in_acoustics_t acoustics)
+                                    audio_session_t session,
+                                    audio_input_flags_t flags)
 {
-    audio_io_handle_t input = 0;
-    audio_devices_t device = getDeviceForInputSource(inputSource);
+    ALOGV("getInput() inputSource %d, samplingRate %d, format %d, channelMask %x, session %d, "
+          "flags %#x",
+          inputSource, samplingRate, format, channelMask, session, flags);
 
-    ALOGV("getInput() inputSource %d, samplingRate %d, format %d, channelMask %x, acoustics %x",
-          inputSource, samplingRate, format, channelMask, acoustics);
+    audio_devices_t device = getDeviceForInputSource(inputSource);
 
     if (device == AUDIO_DEVICE_NONE) {
         ALOGW("getInput() could not find device for inputSource %d", inputSource);
-        return 0;
+        return AUDIO_IO_HANDLE_NONE;
     }
 
     // adapt channel selection to input source
-    switch(inputSource) {
+    switch (inputSource) {
     case AUDIO_SOURCE_VOICE_UPLINK:
         channelMask = AUDIO_CHANNEL_IN_VOICE_UPLINK;
         break;
@@ -1086,51 +1096,63 @@
     sp<IOProfile> profile = getInputProfile(device,
                                          samplingRate,
                                          format,
-                                         channelMask);
+                                         channelMask,
+                                         flags);
     if (profile == 0) {
-        ALOGW("getInput() could not find profile for device %04x, samplingRate %d, format %d, "
-                "channelMask %04x",
-                device, samplingRate, format, channelMask);
-        return 0;
+        ALOGW("getInput() could not find profile for device 0x%X, samplingRate %u, format %#x, "
+                "channelMask 0x%X, flags %#x",
+                device, samplingRate, format, channelMask, flags);
+        return AUDIO_IO_HANDLE_NONE;
     }
 
     if (profile->mModule->mHandle == 0) {
         ALOGE("getInput(): HW module %s not opened", profile->mModule->mName);
-        return 0;
+        return AUDIO_IO_HANDLE_NONE;
+    }
+
+    audio_config_t config = AUDIO_CONFIG_INITIALIZER;
+    config.sample_rate = samplingRate;
+    config.channel_mask = channelMask;
+    config.format = format;
+    audio_io_handle_t input = AUDIO_IO_HANDLE_NONE;
+    status_t status = mpClientInterface->openInput(profile->mModule->mHandle,
+                                                   &input,
+                                                   &config,
+                                                   &device,
+                                                   String8(""),
+                                                   inputSource,
+                                                   flags);
+
+    // only accept input with the exact requested set of parameters
+    if (status != NO_ERROR ||
+        (samplingRate != config.sample_rate) ||
+        (format != config.format) ||
+        (channelMask != config.channel_mask)) {
+        ALOGW("getInput() failed opening input: samplingRate %d, format %d, channelMask %x",
+                samplingRate, format, channelMask);
+        if (input != AUDIO_IO_HANDLE_NONE) {
+            mpClientInterface->closeInput(input);
+        }
+        return AUDIO_IO_HANDLE_NONE;
     }
 
     sp<AudioInputDescriptor> inputDesc = new AudioInputDescriptor(profile);
-
     inputDesc->mInputSource = inputSource;
-    inputDesc->mDevice = device;
+    inputDesc->mRefCount = 0;
+    inputDesc->mOpenRefCount = 1;
     inputDesc->mSamplingRate = samplingRate;
     inputDesc->mFormat = format;
     inputDesc->mChannelMask = channelMask;
-    inputDesc->mRefCount = 0;
-    input = mpClientInterface->openInput(profile->mModule->mHandle,
-                                    &inputDesc->mDevice,
-                                    &inputDesc->mSamplingRate,
-                                    &inputDesc->mFormat,
-                                    &inputDesc->mChannelMask);
+    inputDesc->mDevice = device;
+    inputDesc->mSessions.add(session);
 
-    // only accept input with the exact requested set of parameters
-    if (input == 0 ||
-        (samplingRate != inputDesc->mSamplingRate) ||
-        (format != inputDesc->mFormat) ||
-        (channelMask != inputDesc->mChannelMask)) {
-        ALOGI("getInput() failed opening input: samplingRate %d, format %d, channelMask %x",
-                samplingRate, format, channelMask);
-        if (input != 0) {
-            mpClientInterface->closeInput(input);
-        }
-        return 0;
-    }
     addInput(input, inputDesc);
     mpClientInterface->onAudioPortListUpdate();
     return input;
 }
 
-status_t AudioPolicyManager::startInput(audio_io_handle_t input)
+status_t AudioPolicyManager::startInput(audio_io_handle_t input,
+                                        audio_session_t session)
 {
     ALOGV("startInput() input %d", input);
     ssize_t index = mInputs.indexOfKey(input);
@@ -1140,41 +1162,52 @@
     }
     sp<AudioInputDescriptor> inputDesc = mInputs.valueAt(index);
 
-#ifdef AUDIO_POLICY_TEST
-    if (mTestInput == 0)
-#endif //AUDIO_POLICY_TEST
-    {
-        // refuse 2 active AudioRecord clients at the same time except if the active input
-        // uses AUDIO_SOURCE_HOTWORD in which case it is closed.
+    index = inputDesc->mSessions.indexOf(session);
+    if (index < 0) {
+        ALOGW("startInput() unknown session %d on input %d", session, input);
+        return BAD_VALUE;
+    }
+
+    // virtual input devices are compatible with other input devices
+    if (!isVirtualInputDevice(inputDesc->mDevice)) {
+
+        // for a non-virtual input device, check if there is another (non-virtual) active input
         audio_io_handle_t activeInput = getActiveInput();
-        if (!isVirtualInputDevice(inputDesc->mDevice) && activeInput != 0) {
+        if (activeInput != 0 && activeInput != input) {
+
+            // If the already active input uses AUDIO_SOURCE_HOTWORD then it is closed,
+            // otherwise the active input continues and the new input cannot be started.
             sp<AudioInputDescriptor> activeDesc = mInputs.valueFor(activeInput);
             if (activeDesc->mInputSource == AUDIO_SOURCE_HOTWORD) {
-                ALOGW("startInput() preempting already started low-priority input %d", activeInput);
-                stopInput(activeInput);
-                releaseInput(activeInput);
+                ALOGW("startInput(%d) preempting low-priority input %d", input, activeInput);
+                stopInput(activeInput, activeDesc->mSessions.itemAt(0));
+                releaseInput(activeInput, activeDesc->mSessions.itemAt(0));
             } else {
-                ALOGW("startInput() input %d failed: other input already started", input);
+                ALOGE("startInput(%d) failed: other input %d already started", input, activeInput);
                 return INVALID_OPERATION;
             }
         }
     }
 
-    setInputDevice(input, getNewInputDevice(input), true /* force */);
+    if (inputDesc->mRefCount == 0) {
+        setInputDevice(input, getNewInputDevice(input), true /* force */);
 
-    // automatically enable the remote submix output when input is started
-    if (audio_is_remote_submix_device(inputDesc->mDevice)) {
-        setDeviceConnectionState(AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
-                AUDIO_POLICY_DEVICE_STATE_AVAILABLE, AUDIO_REMOTE_SUBMIX_DEVICE_ADDRESS);
+        // Automatically enable the remote submix output when input is started.
+        // For remote submix (a virtual device), we open only one input per capture request.
+        if (audio_is_remote_submix_device(inputDesc->mDevice)) {
+            setDeviceConnectionState(AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
+                    AUDIO_POLICY_DEVICE_STATE_AVAILABLE, AUDIO_REMOTE_SUBMIX_DEVICE_ADDRESS);
+        }
     }
 
     ALOGV("AudioPolicyManager::startInput() input source = %d", inputDesc->mInputSource);
 
-    inputDesc->mRefCount = 1;
+    inputDesc->mRefCount++;
     return NO_ERROR;
 }
 
-status_t AudioPolicyManager::stopInput(audio_io_handle_t input)
+status_t AudioPolicyManager::stopInput(audio_io_handle_t input,
+                                       audio_session_t session)
 {
     ALOGV("stopInput() input %d", input);
     ssize_t index = mInputs.indexOfKey(input);
@@ -1184,10 +1217,20 @@
     }
     sp<AudioInputDescriptor> inputDesc = mInputs.valueAt(index);
 
+    index = inputDesc->mSessions.indexOf(session);
+    if (index < 0) {
+        ALOGW("stopInput() unknown session %d on input %d", session, input);
+        return BAD_VALUE;
+    }
+
     if (inputDesc->mRefCount == 0) {
         ALOGW("stopInput() input %d already stopped", input);
         return INVALID_OPERATION;
-    } else {
+    }
+
+    inputDesc->mRefCount--;
+    if (inputDesc->mRefCount == 0) {
+
         // automatically disable the remote submix output when input is stopped
         if (audio_is_remote_submix_device(inputDesc->mDevice)) {
             setDeviceConnectionState(AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
@@ -1195,12 +1238,12 @@
         }
 
         resetInputDevice(input);
-        inputDesc->mRefCount = 0;
-        return NO_ERROR;
     }
+    return NO_ERROR;
 }
 
-void AudioPolicyManager::releaseInput(audio_io_handle_t input)
+void AudioPolicyManager::releaseInput(audio_io_handle_t input,
+                                      audio_session_t session)
 {
     ALOGV("releaseInput() %d", input);
     ssize_t index = mInputs.indexOfKey(input);
@@ -1208,6 +1251,25 @@
         ALOGW("releaseInput() releasing unknown input %d", input);
         return;
     }
+    sp<AudioInputDescriptor> inputDesc = mInputs.valueAt(index);
+    ALOG_ASSERT(inputDesc != 0);
+
+    index = inputDesc->mSessions.indexOf(session);
+    if (index < 0) {
+        ALOGW("releaseInput() unknown session %d on input %d", session, input);
+        return;
+    }
+    inputDesc->mSessions.remove(session);
+    if (inputDesc->mOpenRefCount == 0) {
+        ALOGW("releaseInput() invalid open ref count %d", inputDesc->mOpenRefCount);
+        return;
+    }
+    inputDesc->mOpenRefCount--;
+    if (inputDesc->mOpenRefCount > 0) {
+        ALOGV("releaseInput() exit > 0");
+        return;
+    }
+
     mpClientInterface->closeInput(input);
     mInputs.removeItem(input);
     nextAudioPortGeneration();
@@ -1871,9 +1933,11 @@
 
         if (!outputDesc->mProfile->isCompatibleProfile(devDesc->mDeviceType,
                                                        patch->sources[0].sample_rate,
+                                                     NULL,  // updatedSamplingRate
                                                      patch->sources[0].format,
                                                      patch->sources[0].channel_mask,
-                                                     AUDIO_OUTPUT_FLAG_NONE)) {
+                                                     AUDIO_OUTPUT_FLAG_NONE /*FIXME*/)) {
+            ALOGV("createAudioPatch() profile not supported");
             return INVALID_OPERATION;
         }
         // TODO: reconfigure output format and channels here
@@ -1915,10 +1979,14 @@
             }
 
             if (!inputDesc->mProfile->isCompatibleProfile(devDesc->mDeviceType,
-                                                           patch->sinks[0].sample_rate,
+                                                         patch->sinks[0].sample_rate,
+                                                         NULL, /*updatedSampleRate*/
                                                          patch->sinks[0].format,
                                                          patch->sinks[0].channel_mask,
-                                                         AUDIO_OUTPUT_FLAG_NONE)) {
+                                                         // FIXME for the parameter type,
+                                                         // and the NONE
+                                                         (audio_output_flags_t)
+                                                            AUDIO_INPUT_FLAG_NONE)) {
                 return INVALID_OPERATION;
             }
             // TODO: reconfigure output format and channels here
@@ -1962,9 +2030,20 @@
             srcDeviceDesc->toAudioPortConfig(&newPatch.sources[0], &patch->sources[0]);
             sinkDeviceDesc->toAudioPortConfig(&newPatch.sinks[0], &patch->sinks[0]);
 
-            // TODO: add support for devices on different HW modules
             if (srcDeviceDesc->mModule != sinkDeviceDesc->mModule) {
-                return INVALID_OPERATION;
+                SortedVector<audio_io_handle_t> outputs =
+                                        getOutputsForDevice(sinkDeviceDesc->mDeviceType, mOutputs);
+                // if the sink device is reachable via an opened output stream, request to go via
+                // this output stream by adding a second source to the patch description
+                audio_io_handle_t output = selectOutput(outputs, AUDIO_OUTPUT_FLAG_NONE);
+                if (output != AUDIO_IO_HANDLE_NONE) {
+                    sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(output);
+                    if (outputDesc->isDuplicated()) {
+                        return INVALID_OPERATION;
+                    }
+                    outputDesc->toAudioPortConfig(&newPatch.sources[1], &patch->sources[0]);
+                    newPatch.num_sources = 2;
+                }
             }
             // TODO: check from routing capabilities in config file and other conflicting patches
 
@@ -2269,25 +2348,39 @@
                 continue;
             }
 
-            audio_devices_t profileTypes = outProfile->mSupportedDevices.types();
-            if ((profileTypes & outputDeviceTypes) &&
+            audio_devices_t profileType = outProfile->mSupportedDevices.types();
+            if ((profileType & mDefaultOutputDevice->mDeviceType) != AUDIO_DEVICE_NONE) {
+                profileType = mDefaultOutputDevice->mDeviceType;
+            } else {
+                profileType = outProfile->mSupportedDevices[0]->mDeviceType;
+            }
+            if ((profileType & outputDeviceTypes) &&
                     ((outProfile->mFlags & AUDIO_OUTPUT_FLAG_DIRECT) == 0)) {
                 sp<AudioOutputDescriptor> outputDesc = new AudioOutputDescriptor(outProfile);
 
-                outputDesc->mDevice = (audio_devices_t)(mDefaultOutputDevice->mDeviceType & profileTypes);
-                audio_io_handle_t output = mpClientInterface->openOutput(
-                                                outProfile->mModule->mHandle,
-                                                &outputDesc->mDevice,
-                                                &outputDesc->mSamplingRate,
-                                                &outputDesc->mFormat,
-                                                &outputDesc->mChannelMask,
-                                                &outputDesc->mLatency,
-                                                outputDesc->mFlags);
-                if (output == 0) {
+                outputDesc->mDevice = profileType;
+                audio_config_t config = AUDIO_CONFIG_INITIALIZER;
+                config.sample_rate = outputDesc->mSamplingRate;
+                config.channel_mask = outputDesc->mChannelMask;
+                config.format = outputDesc->mFormat;
+                audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
+                status_t status = mpClientInterface->openOutput(outProfile->mModule->mHandle,
+                                                                &output,
+                                                                &config,
+                                                                &outputDesc->mDevice,
+                                                                String8(""),
+                                                                &outputDesc->mLatency,
+                                                                outputDesc->mFlags);
+
+                if (status != NO_ERROR) {
                     ALOGW("Cannot open output stream for device %08x on hw module %s",
                           outputDesc->mDevice,
                           mHwModules[i]->mName);
                 } else {
+                    outputDesc->mSamplingRate = config.sample_rate;
+                    outputDesc->mChannelMask = config.channel_mask;
+                    outputDesc->mFormat = config.format;
+
                     for (size_t k = 0; k  < outProfile->mSupportedDevices.size(); k++) {
                         audio_devices_t type = outProfile->mSupportedDevices[k]->mDeviceType;
                         ssize_t index =
@@ -2303,7 +2396,6 @@
                         mPrimaryOutput = output;
                     }
                     addOutput(output, outputDesc);
-                    ALOGI("CSTOR setOutputDevice %08x", outputDesc->mDevice);
                     setOutputDevice(output,
                                     outputDesc->mDevice,
                                     true);
@@ -2321,20 +2413,27 @@
                 continue;
             }
 
-            audio_devices_t profileTypes = inProfile->mSupportedDevices.types();
-            if (profileTypes & inputDeviceTypes) {
+            audio_devices_t profileType = inProfile->mSupportedDevices[0]->mDeviceType;
+            if (profileType & inputDeviceTypes) {
                 sp<AudioInputDescriptor> inputDesc = new AudioInputDescriptor(inProfile);
 
                 inputDesc->mInputSource = AUDIO_SOURCE_MIC;
-                inputDesc->mDevice = inProfile->mSupportedDevices[0]->mDeviceType;
-                audio_io_handle_t input = mpClientInterface->openInput(
-                                                    inProfile->mModule->mHandle,
-                                                    &inputDesc->mDevice,
-                                                    &inputDesc->mSamplingRate,
-                                                    &inputDesc->mFormat,
-                                                    &inputDesc->mChannelMask);
+                inputDesc->mDevice = profileType;
 
-                if (input != 0) {
+                audio_config_t config = AUDIO_CONFIG_INITIALIZER;
+                config.sample_rate = inputDesc->mSamplingRate;
+                config.channel_mask = inputDesc->mChannelMask;
+                config.format = inputDesc->mFormat;
+                audio_io_handle_t input = AUDIO_IO_HANDLE_NONE;
+                status_t status = mpClientInterface->openInput(inProfile->mModule->mHandle,
+                                                               &input,
+                                                               &config,
+                                                               &inputDesc->mDevice,
+                                                               String8(""),
+                                                               AUDIO_SOURCE_MIC,
+                                                               AUDIO_INPUT_FLAG_NONE);
+
+                if (status == NO_ERROR) {
                     for (size_t k = 0; k  < inProfile->mSupportedDevices.size(); k++) {
                         audio_devices_t type = inProfile->mSupportedDevices[k]->mDeviceType;
                         ssize_t index =
@@ -2533,17 +2632,25 @@
 
                 sp<AudioOutputDescriptor> outputDesc = new AudioOutputDescriptor(NULL);
                 outputDesc->mDevice = AUDIO_DEVICE_OUT_SPEAKER;
-                mPrimaryOutput = mpClientInterface->openOutput(moduleHandle,
-                                                &outputDesc->mDevice,
-                                                &outputDesc->mSamplingRate,
-                                                &outputDesc->mFormat,
-                                                &outputDesc->mChannelMask,
-                                                &outputDesc->mLatency,
-                                                outputDesc->mFlags);
-                if (mPrimaryOutput == 0) {
-                    ALOGE("Failed to reopen hardware output stream, samplingRate: %d, format %d, channels %d",
-                            outputDesc->mSamplingRate, outputDesc->mFormat, outputDesc->mChannelMask);
+                audio_config_t config = AUDIO_CONFIG_INITIALIZER;
+                config.sample_rate = outputDesc->mSamplingRate;
+                config.channel_mask = outputDesc->mChannelMask;
+                config.format = outputDesc->mFormat;
+                status_t status = mpClientInterface->openOutput(moduleHandle,
+                                                                &mPrimaryOutput,
+                                                                &config,
+                                                                &outputDesc->mDevice,
+                                                                String8(""),
+                                                                &outputDesc->mLatency,
+                                                                outputDesc->mFlags);
+                if (status != NO_ERROR) {
+                    ALOGE("Failed to reopen hardware output stream, "
+                        "samplingRate: %d, format %d, channels %d",
+                        outputDesc->mSamplingRate, outputDesc->mFormat, outputDesc->mChannelMask);
                 } else {
+                    outputDesc->mSamplingRate = config.sample_rate;
+                    outputDesc->mChannelMask = config.channel_mask;
+                    outputDesc->mFormat = config.format;
                     AudioParameter outputCmd = AudioParameter();
                     outputCmd.addInt(String8("set_id"), 0);
                     mpClientInterface->setParameters(mPrimaryOutput, outputCmd.toString());
@@ -2595,12 +2702,31 @@
     nextAudioPortGeneration();
 }
 
-String8 AudioPolicyManager::addressToParameter(audio_devices_t device, const String8 address)
-{
-    if (device & AUDIO_DEVICE_OUT_ALL_A2DP) {
-        return String8("a2dp_sink_address=")+address;
+void AudioPolicyManager::findIoHandlesByAddress(sp<AudioOutputDescriptor> desc /*in*/,
+        const String8 address /*in*/,
+        SortedVector<audio_io_handle_t>& outputs /*out*/) {
+    // look for a match on the given address on the addresses of the outputs:
+    // find the address by finding the patch that maps to this output
+    ssize_t patchIdx = mAudioPatches.indexOfKey(desc->mPatchHandle);
+    //ALOGV("    inspecting output %d (patch %d) for supported device=0x%x",
+    //        outputIdx, patchIdx,  desc->mProfile->mSupportedDevices.types());
+    if (patchIdx >= 0) {
+        const sp<AudioPatch> patchDesc = mAudioPatches.valueAt(patchIdx);
+        const int numSinks = patchDesc->mPatch.num_sinks;
+        for (ssize_t j=0; j < numSinks; j++) {
+            if (patchDesc->mPatch.sinks[j].type == AUDIO_PORT_TYPE_DEVICE) {
+                const char* patchAddr =
+                        patchDesc->mPatch.sinks[j].ext.device.address;
+                if (strncmp(patchAddr,
+                        address.string(), AUDIO_DEVICE_MAX_ADDRESS_LEN) == 0) {
+                    ALOGV("checkOutputsForDevice(): adding opened output %d on same address %s",
+                            desc->mIoHandle,  patchDesc->mPatch.sinks[j].ext.device.address);
+                    outputs.add(desc->mIoHandle);
+                    break;
+                }
+            }
+        }
     }
-    return address;
 }
 
 status_t AudioPolicyManager::checkOutputsForDevice(audio_devices_t device,
@@ -2615,8 +2741,13 @@
         for (size_t i = 0; i < mOutputs.size(); i++) {
             desc = mOutputs.valueAt(i);
             if (!desc->isDuplicated() && (desc->mProfile->mSupportedDevices.types() & device)) {
-                ALOGV("checkOutputsForDevice(): adding opened output %d", mOutputs.keyAt(i));
-                outputs.add(mOutputs.keyAt(i));
+                if (!deviceDistinguishesOnAddress(device)) {
+                    ALOGV("checkOutputsForDevice(): adding opened output %d", mOutputs.keyAt(i));
+                    outputs.add(mOutputs.keyAt(i));
+                } else {
+                    ALOGV("  checking address match due to device 0x%x", device);
+                    findIoHandlesByAddress(desc, address, outputs);
+                }
             }
         }
         // then look for output profiles that can be routed to this device
@@ -2635,6 +2766,8 @@
             }
         }
 
+        ALOGV("  found %d profiles, %d outputs", profiles.size(), outputs.size());
+
         if (profiles.isEmpty() && outputs.isEmpty()) {
             ALOGW("checkOutputsForDevice(): No output available for device %04x", device);
             return BAD_VALUE;
@@ -2647,36 +2780,45 @@
 
             // nothing to do if one output is already opened for this profile
             size_t j;
-            for (j = 0; j < mOutputs.size(); j++) {
-                desc = mOutputs.valueAt(j);
+            for (j = 0; j < outputs.size(); j++) {
+                desc = mOutputs.valueFor(outputs.itemAt(j));
                 if (!desc->isDuplicated() && desc->mProfile == profile) {
                     break;
                 }
             }
-            if (j != mOutputs.size()) {
+            if (j != outputs.size()) {
                 continue;
             }
 
-            ALOGV("opening output for device %08x with params %s", device, address.string());
+            ALOGV("opening output for device %08x with params %s profile %p",
+                                                      device, address.string(), profile.get());
             desc = new AudioOutputDescriptor(profile);
             desc->mDevice = device;
-            audio_offload_info_t offloadInfo = AUDIO_INFO_INITIALIZER;
-            offloadInfo.sample_rate = desc->mSamplingRate;
-            offloadInfo.format = desc->mFormat;
-            offloadInfo.channel_mask = desc->mChannelMask;
+            audio_config_t config = AUDIO_CONFIG_INITIALIZER;
+            config.sample_rate = desc->mSamplingRate;
+            config.channel_mask = desc->mChannelMask;
+            config.format = desc->mFormat;
+            config.offload_info.sample_rate = desc->mSamplingRate;
+            config.offload_info.channel_mask = desc->mChannelMask;
+            config.offload_info.format = desc->mFormat;
+            audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
+            status_t status = mpClientInterface->openOutput(profile->mModule->mHandle,
+                                                            &output,
+                                                            &config,
+                                                            &desc->mDevice,
+                                                            address,
+                                                            &desc->mLatency,
+                                                            desc->mFlags);
+            if (status == NO_ERROR) {
+                desc->mSamplingRate = config.sample_rate;
+                desc->mChannelMask = config.channel_mask;
+                desc->mFormat = config.format;
 
-            audio_io_handle_t output = mpClientInterface->openOutput(profile->mModule->mHandle,
-                                                                       &desc->mDevice,
-                                                                       &desc->mSamplingRate,
-                                                                       &desc->mFormat,
-                                                                       &desc->mChannelMask,
-                                                                       &desc->mLatency,
-                                                                       desc->mFlags,
-                                                                       &offloadInfo);
-            if (output != 0) {
                 // Here is where the out_set_parameters() for card & device gets called
                 if (!address.isEmpty()) {
-                    mpClientInterface->setParameters(output, addressToParameter(device, address));
+                    char *param = audio_device_address_to_parameter(device, address);
+                    mpClientInterface->setParameters(output, String8(param));
+                    free(param);
                 }
 
                 // Here is where we step through and resolve any "dynamic" fields
@@ -2685,7 +2827,7 @@
                 if (profile->mSamplingRates[0] == 0) {
                     reply = mpClientInterface->getParameters(output,
                                             String8(AUDIO_PARAMETER_STREAM_SUP_SAMPLING_RATES));
-                    ALOGV("checkOutputsForDevice() direct output sup sampling rates %s",
+                    ALOGV("checkOutputsForDevice() supported sampling rates %s",
                               reply.string());
                     value = strpbrk((char *)reply.string(), "=");
                     if (value != NULL) {
@@ -2695,7 +2837,7 @@
                 if (profile->mFormats[0] == AUDIO_FORMAT_DEFAULT) {
                     reply = mpClientInterface->getParameters(output,
                                                    String8(AUDIO_PARAMETER_STREAM_SUP_FORMATS));
-                    ALOGV("checkOutputsForDevice() direct output sup formats %s",
+                    ALOGV("checkOutputsForDevice() supported formats %s",
                               reply.string());
                     value = strpbrk((char *)reply.string(), "=");
                     if (value != NULL) {
@@ -2705,7 +2847,7 @@
                 if (profile->mChannelMasks[0] == 0) {
                     reply = mpClientInterface->getParameters(output,
                                                   String8(AUDIO_PARAMETER_STREAM_SUP_CHANNELS));
-                    ALOGV("checkOutputsForDevice() direct output sup channel masks %s",
+                    ALOGV("checkOutputsForDevice() supported channel masks %s",
                               reply.string());
                     value = strpbrk((char *)reply.string(), "=");
                     if (value != NULL) {
@@ -2718,33 +2860,38 @@
                          (profile->mFormats.size() < 2)) ||
                      ((profile->mChannelMasks[0] == 0) &&
                          (profile->mChannelMasks.size() < 2))) {
-                    ALOGW("checkOutputsForDevice() direct output missing param");
+                    ALOGW("checkOutputsForDevice() missing param");
                     mpClientInterface->closeOutput(output);
-                    output = 0;
+                    output = AUDIO_IO_HANDLE_NONE;
                 } else if (profile->mSamplingRates[0] == 0 || profile->mFormats[0] == 0 ||
                             profile->mChannelMasks[0] == 0) {
                     mpClientInterface->closeOutput(output);
-                    desc->mSamplingRate = profile->pickSamplingRate();
-                    desc->mFormat = profile->pickFormat();
-                    desc->mChannelMask = profile->pickChannelMask();
-                    offloadInfo.sample_rate = desc->mSamplingRate;
-                    offloadInfo.format = desc->mFormat;
-                    offloadInfo.channel_mask = desc->mChannelMask;
-                    output = mpClientInterface->openOutput(
-                                                    profile->mModule->mHandle,
-                                                    &desc->mDevice,
-                                                    &desc->mSamplingRate,
-                                                    &desc->mFormat,
-                                                    &desc->mChannelMask,
-                                                    &desc->mLatency,
-                                                    desc->mFlags,
-                                                    &offloadInfo);
+                    config.sample_rate = profile->pickSamplingRate();
+                    config.channel_mask = profile->pickChannelMask();
+                    config.format = profile->pickFormat();
+                    config.offload_info.sample_rate = config.sample_rate;
+                    config.offload_info.channel_mask = config.channel_mask;
+                    config.offload_info.format = config.format;
+                    status = mpClientInterface->openOutput(profile->mModule->mHandle,
+                                                           &output,
+                                                           &config,
+                                                           &desc->mDevice,
+                                                           address,
+                                                           &desc->mLatency,
+                                                           desc->mFlags);
+                    if (status == NO_ERROR) {
+                        desc->mSamplingRate = config.sample_rate;
+                        desc->mChannelMask = config.channel_mask;
+                        desc->mFormat = config.format;
+                    } else {
+                        output = AUDIO_IO_HANDLE_NONE;
+                    }
                 }
 
-                if (output != 0) {
+                if (output != AUDIO_IO_HANDLE_NONE) {
                     addOutput(output, desc);
                     if ((desc->mFlags & AUDIO_OUTPUT_FLAG_DIRECT) == 0) {
-                        audio_io_handle_t duplicatedOutput = 0;
+                        audio_io_handle_t duplicatedOutput = AUDIO_IO_HANDLE_NONE;
 
                         // set initial stream volume for device
                         applyStreamVolumes(output, device, 0, true);
@@ -2754,9 +2901,10 @@
                         // open a duplicating output thread for the new output and the primary output
                         duplicatedOutput = mpClientInterface->openDuplicateOutput(output,
                                                                                   mPrimaryOutput);
-                        if (duplicatedOutput != 0) {
+                        if (duplicatedOutput != AUDIO_IO_HANDLE_NONE) {
                             // add duplicated output descriptor
-                            sp<AudioOutputDescriptor> dupOutputDesc = new AudioOutputDescriptor(NULL);
+                            sp<AudioOutputDescriptor> dupOutputDesc =
+                                    new AudioOutputDescriptor(NULL);
                             dupOutputDesc->mOutput1 = mOutputs.valueFor(mPrimaryOutput);
                             dupOutputDesc->mOutput2 = mOutputs.valueFor(output);
                             dupOutputDesc->mSamplingRate = desc->mSamplingRate;
@@ -2771,17 +2919,25 @@
                             mpClientInterface->closeOutput(output);
                             mOutputs.removeItem(output);
                             nextAudioPortGeneration();
-                            output = 0;
+                            output = AUDIO_IO_HANDLE_NONE;
                         }
                     }
                 }
+            } else {
+                output = AUDIO_IO_HANDLE_NONE;
             }
-            if (output == 0) {
+            if (output == AUDIO_IO_HANDLE_NONE) {
                 ALOGW("checkOutputsForDevice() could not open output for device %x", device);
                 profiles.removeAt(profile_index);
                 profile_index--;
             } else {
                 outputs.add(output);
+                if (deviceDistinguishesOnAddress(device)) {
+                    ALOGV("checkOutputsForDevice(): setOutputDevice(dev=0x%x, addr=%s)",
+                            device, address.string());
+                    setOutputDevice(output, device, true/*force*/, 0/*delay*/,
+                            NULL/*patch handle*/, address.string());
+                }
                 ALOGV("checkOutputsForDevice(): adding output %d", output);
             }
         }
@@ -2794,11 +2950,17 @@
         // check if one opened output is not needed any more after disconnecting one device
         for (size_t i = 0; i < mOutputs.size(); i++) {
             desc = mOutputs.valueAt(i);
-            if (!desc->isDuplicated() &&
-                    !(desc->mProfile->mSupportedDevices.types() &
-                            mAvailableOutputDevices.types())) {
-                ALOGV("checkOutputsForDevice(): disconnecting adding output %d", mOutputs.keyAt(i));
-                outputs.add(mOutputs.keyAt(i));
+            if (!desc->isDuplicated()) {
+                if  (!(desc->mProfile->mSupportedDevices.types()
+                        & mAvailableOutputDevices.types())) {
+                    ALOGV("checkOutputsForDevice(): disconnecting adding output %d",
+                            mOutputs.keyAt(i));
+                    outputs.add(mOutputs.keyAt(i));
+                } else if (deviceDistinguishesOnAddress(device) &&
+                        // exact match on device
+                        (desc->mProfile->mSupportedDevices.types() == device)) {
+                    findIoHandlesByAddress(desc, address, outputs);
+                }
             }
         }
         // Clear any profiles associated with the disconnected device.
@@ -2893,16 +3055,28 @@
             ALOGV("opening input for device 0x%X with params %s", device, address.string());
             desc = new AudioInputDescriptor(profile);
             desc->mDevice = device;
+            audio_config_t config = AUDIO_CONFIG_INITIALIZER;
+            config.sample_rate = desc->mSamplingRate;
+            config.channel_mask = desc->mChannelMask;
+            config.format = desc->mFormat;
+            audio_io_handle_t input = AUDIO_IO_HANDLE_NONE;
+            status_t status = mpClientInterface->openInput(profile->mModule->mHandle,
+                                                           &input,
+                                                           &config,
+                                                           &desc->mDevice,
+                                                           address,
+                                                           AUDIO_SOURCE_MIC,
+                                                           AUDIO_INPUT_FLAG_NONE /*FIXME*/);
 
-            audio_io_handle_t input = mpClientInterface->openInput(profile->mModule->mHandle,
-                                            &desc->mDevice,
-                                            &desc->mSamplingRate,
-                                            &desc->mFormat,
-                                            &desc->mChannelMask);
+            if (status == NO_ERROR) {
+                desc->mSamplingRate = config.sample_rate;
+                desc->mChannelMask = config.channel_mask;
+                desc->mFormat = config.format;
 
-            if (input != 0) {
                 if (!address.isEmpty()) {
-                    mpClientInterface->setParameters(input, addressToParameter(device, address));
+                    char *param = audio_device_address_to_parameter(device, address);
+                    mpClientInterface->setParameters(input, String8(param));
+                    free(param);
                 }
 
                 // Here is where we step through and resolve any "dynamic" fields
@@ -2942,7 +3116,7 @@
                      ((profile->mChannelMasks[0] == 0) && (profile->mChannelMasks.size() < 2))) {
                     ALOGW("checkInputsForDevice() direct input missing param");
                     mpClientInterface->closeInput(input);
-                    input = 0;
+                    input = AUDIO_IO_HANDLE_NONE;
                 }
 
                 if (input != 0) {
@@ -2950,7 +3124,7 @@
                 }
             } // endif input != 0
 
-            if (input == 0) {
+            if (input == AUDIO_IO_HANDLE_NONE) {
                 ALOGW("checkInputsForDevice() could not open input for device 0x%X", device);
                 profiles.removeAt(profile_index);
                 profile_index--;
@@ -3690,7 +3864,8 @@
                                              audio_devices_t device,
                                              bool force,
                                              int delayMs,
-                                             audio_patch_handle_t *patchHandle)
+                                             audio_patch_handle_t *patchHandle,
+                                             const char* address)
 {
     ALOGV("setOutputDevice() output %d device %04x delayMs %d", output, device, delayMs);
     sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(output);
@@ -3736,7 +3911,9 @@
     if (device == AUDIO_DEVICE_NONE) {
         resetOutputDevice(output, delayMs, NULL);
     } else {
-        DeviceVector deviceList = mAvailableOutputDevices.getDevicesFromType(device);
+        DeviceVector deviceList = (address == NULL) ?
+                mAvailableOutputDevices.getDevicesFromType(device)
+                : mAvailableOutputDevices.getDevicesFromTypeAddr(device, String8(address));
         if (!deviceList.isEmpty()) {
             struct audio_patch patch;
             outputDesc->toAudioPortConfig(&patch.sources[0]);
@@ -3830,6 +4007,11 @@
         if (!deviceList.isEmpty()) {
             struct audio_patch patch;
             inputDesc->toAudioPortConfig(&patch.sinks[0]);
+            // AUDIO_SOURCE_HOTWORD is for internal use only:
+            // handled as AUDIO_SOURCE_VOICE_RECOGNITION by the audio HAL
+            if (patch.sinks[0].ext.mix.usecase.source == AUDIO_SOURCE_HOTWORD) {
+                patch.sinks[0].ext.mix.usecase.source = AUDIO_SOURCE_VOICE_RECOGNITION;
+            }
             patch.num_sinks = 1;
             //only one input device for now
             deviceList.itemAt(0)->toAudioPortConfig(&patch.sources[0]);
@@ -3898,9 +4080,10 @@
 }
 
 sp<AudioPolicyManager::IOProfile> AudioPolicyManager::getInputProfile(audio_devices_t device,
-                                                   uint32_t samplingRate,
+                                                   uint32_t& samplingRate,
                                                    audio_format_t format,
-                                                   audio_channel_mask_t channelMask)
+                                                   audio_channel_mask_t channelMask,
+                                                   audio_input_flags_t flags)
 {
     // Choose an input profile based on the requested capture parameters: select the first available
     // profile supporting all requested parameters.
@@ -3914,8 +4097,9 @@
         {
             sp<IOProfile> profile = mHwModules[i]->mInputProfiles[j];
             // profile->log();
-            if (profile->isCompatibleProfile(device, samplingRate, format,
-                                             channelMask, AUDIO_OUTPUT_FLAG_NONE)) {
+            if (profile->isCompatibleProfile(device, samplingRate,
+                                             &samplingRate /*updatedSamplingRate*/,
+                                             format, channelMask, (audio_output_flags_t) flags)) {
                 return profile;
             }
         }
@@ -3994,6 +4178,10 @@
     return false;
 }
 
+bool AudioPolicyManager::deviceDistinguishesOnAddress(audio_devices_t device) {
+    return ((device & APM_AUDIO_DEVICE_MATCH_ADDRESS_ALL) != 0);
+}
+
 audio_io_handle_t AudioPolicyManager::getActiveInput(bool ignoreVirtualInputs)
 {
     for (size_t i = 0; i < mInputs.size(); i++) {
@@ -4256,14 +4444,6 @@
         device = outputDesc->device();
     }
 
-    // if volume is not 0 (not muted), force media volume to max on digital output
-    if (stream == AUDIO_STREAM_MUSIC &&
-        index != mStreams[stream].mIndexMin &&
-        (device == AUDIO_DEVICE_OUT_AUX_DIGITAL ||
-         device == AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET)) {
-        return 1.0;
-    }
-
     volume = volIndexToAmpl(device, streamDesc, index);
 
     // if a headset is connected, apply the following rules to ring tones and notifications
@@ -4522,13 +4702,13 @@
     }
     if (profile != NULL) {
         mAudioPort = profile;
+        mFlags = profile->mFlags;
         mSamplingRate = profile->pickSamplingRate();
         mFormat = profile->pickFormat();
         mChannelMask = profile->pickChannelMask();
         if (profile->mGains.size() > 0) {
             profile->mGains[0]->getDefaultConfig(&mGain);
         }
-        mFlags = profile->mFlags;
     }
 }
 
@@ -4763,6 +4943,9 @@
     result.append(buffer);
     snprintf(buffer, SIZE, " Ref Count %d\n", mRefCount);
     result.append(buffer);
+    snprintf(buffer, SIZE, " Open Ref Count %d\n", mOpenRefCount);
+    result.append(buffer);
+
     write(fd, result.string(), result.size());
 
     return NO_ERROR;
@@ -5222,7 +5405,7 @@
     }
 }
 
-status_t AudioPolicyManager::AudioPort::checkSamplingRate(uint32_t samplingRate) const
+status_t AudioPolicyManager::AudioPort::checkExactSamplingRate(uint32_t samplingRate) const
 {
     for (size_t i = 0; i < mSamplingRates.size(); i ++) {
         if (mSamplingRates[i] == samplingRate) {
@@ -5232,9 +5415,68 @@
     return BAD_VALUE;
 }
 
-status_t AudioPolicyManager::AudioPort::checkChannelMask(audio_channel_mask_t channelMask) const
+status_t AudioPolicyManager::AudioPort::checkCompatibleSamplingRate(uint32_t samplingRate,
+        uint32_t *updatedSamplingRate) const
 {
-    for (size_t i = 0; i < mChannelMasks.size(); i ++) {
+    // Search for the closest supported sampling rate that is above (preferred)
+    // or below (acceptable) the desired sampling rate, within a permitted ratio.
+    // The sampling rates do not need to be sorted in ascending order.
+    ssize_t maxBelow = -1;
+    ssize_t minAbove = -1;
+    uint32_t candidate;
+    for (size_t i = 0; i < mSamplingRates.size(); i++) {
+        candidate = mSamplingRates[i];
+        if (candidate == samplingRate) {
+            if (updatedSamplingRate != NULL) {
+                *updatedSamplingRate = candidate;
+            }
+            return NO_ERROR;
+        }
+        // candidate < desired
+        if (candidate < samplingRate) {
+            if (maxBelow < 0 || candidate > mSamplingRates[maxBelow]) {
+                maxBelow = i;
+            }
+        // candidate > desired
+        } else {
+            if (minAbove < 0 || candidate < mSamplingRates[minAbove]) {
+                minAbove = i;
+            }
+        }
+    }
+    // This uses hard-coded knowledge about AudioFlinger resampling ratios.
+    // TODO Move these assumptions out.
+    static const uint32_t kMaxDownSampleRatio = 6;  // beyond this aliasing occurs
+    static const uint32_t kMaxUpSampleRatio = 256;  // beyond this sample rate inaccuracies occur
+                                                    // due to approximation by an int32_t of the
+                                                    // phase increments
+    // Prefer to down-sample from a higher sampling rate, as we get the desired frequency spectrum.
+    if (minAbove >= 0) {
+        candidate = mSamplingRates[minAbove];
+        if (candidate / kMaxDownSampleRatio <= samplingRate) {
+            if (updatedSamplingRate != NULL) {
+                *updatedSamplingRate = candidate;
+            }
+            return NO_ERROR;
+        }
+    }
+    // But if we have to up-sample from a lower sampling rate, that's OK.
+    if (maxBelow >= 0) {
+        candidate = mSamplingRates[maxBelow];
+        if (candidate * kMaxUpSampleRatio >= samplingRate) {
+            if (updatedSamplingRate != NULL) {
+                *updatedSamplingRate = candidate;
+            }
+            return NO_ERROR;
+        }
+    }
+    // leave updatedSamplingRate unmodified
+    return BAD_VALUE;
+}
+
+status_t AudioPolicyManager::AudioPort::checkExactChannelMask(audio_channel_mask_t channelMask) const
+{
+    for (size_t i = 0; i < mChannelMasks.size(); i++) {
         if (mChannelMasks[i] == channelMask) {
             return NO_ERROR;
         }
@@ -5242,6 +5484,30 @@
     return BAD_VALUE;
 }
 
+status_t AudioPolicyManager::AudioPort::checkCompatibleChannelMask(audio_channel_mask_t channelMask)
+        const
+{
+    const bool isRecordThread = mType == AUDIO_PORT_TYPE_MIX && mRole == AUDIO_PORT_ROLE_SINK;
+    for (size_t i = 0; i < mChannelMasks.size(); i ++) {
+        // FIXME Does not handle multi-channel automatic conversions yet
+        audio_channel_mask_t supported = mChannelMasks[i];
+        if (supported == channelMask) {
+            return NO_ERROR;
+        }
+        if (isRecordThread) {
+            // This uses hard-coded knowledge that AudioFlinger can silently down-mix and up-mix.
+            // FIXME Abstract this out to a table.
+            if (((supported == AUDIO_CHANNEL_IN_FRONT_BACK || supported == AUDIO_CHANNEL_IN_STEREO)
+                    && channelMask == AUDIO_CHANNEL_IN_MONO) ||
+                (supported == AUDIO_CHANNEL_IN_MONO && (channelMask == AUDIO_CHANNEL_IN_FRONT_BACK
+                    || channelMask == AUDIO_CHANNEL_IN_STEREO))) {
+                return NO_ERROR;
+            }
+        }
+    }
+    return BAD_VALUE;
+}
+
 status_t AudioPolicyManager::AudioPort::checkFormat(audio_format_t format) const
 {
     for (size_t i = 0; i < mFormats.size(); i ++) {
@@ -5310,10 +5576,14 @@
     return channelMask;
 }
 
+/* format in order of increasing preference */
 const audio_format_t AudioPolicyManager::AudioPort::sPcmFormatCompareTable[] = {
         AUDIO_FORMAT_DEFAULT,
         AUDIO_FORMAT_PCM_16_BIT,
+        AUDIO_FORMAT_PCM_8_24_BIT,
         AUDIO_FORMAT_PCM_24_BIT_PACKED,
+        AUDIO_FORMAT_PCM_32_BIT,
+        AUDIO_FORMAT_PCM_FLOAT,
 };
 
 int AudioPolicyManager::AudioPort::compareFormats(audio_format_t format1,
@@ -5355,12 +5625,14 @@
     }
 
     audio_format_t format = AUDIO_FORMAT_DEFAULT;
-    audio_format_t bestFormat = BEST_MIXER_FORMAT;
+    audio_format_t bestFormat =
+            AudioPolicyManager::AudioPort::sPcmFormatCompareTable[
+                ARRAY_SIZE(AudioPolicyManager::AudioPort::sPcmFormatCompareTable) - 1];
     // For mixed output and inputs, use best mixer output format. Do not
     // limit format otherwise
     if ((mType != AUDIO_PORT_TYPE_MIX) ||
             ((mRole == AUDIO_PORT_ROLE_SOURCE) &&
-             (((mFlags & (AUDIO_OUTPUT_FLAG_DIRECT | AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)) == 0)))) {
+             (((mFlags & (AUDIO_OUTPUT_FLAG_DIRECT | AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)) != 0)))) {
         bestFormat = AUDIO_FORMAT_INVALID;
     }
 
@@ -5435,7 +5707,7 @@
             if (i == 0 && strcmp(formatStr, "") == 0) {
                 snprintf(buffer, SIZE, "Dynamic");
             } else {
-                snprintf(buffer, SIZE, "%-48s", formatStr);
+                snprintf(buffer, SIZE, "%s", formatStr);
             }
             result.append(buffer);
             result.append(i == (mFormats.size() - 1) ? "" : ", ");
@@ -5574,14 +5846,14 @@
         goto exit;
     }
     if (config->config_mask & AUDIO_PORT_CONFIG_SAMPLE_RATE) {
-        status = mAudioPort->checkSamplingRate(config->sample_rate);
+        status = mAudioPort->checkExactSamplingRate(config->sample_rate);
         if (status != NO_ERROR) {
             goto exit;
         }
         mSamplingRate = config->sample_rate;
     }
     if (config->config_mask & AUDIO_PORT_CONFIG_CHANNEL_MASK) {
-        status = mAudioPort->checkChannelMask(config->channel_mask);
+        status = mAudioPort->checkExactChannelMask(config->channel_mask);
         if (status != NO_ERROR) {
             goto exit;
         }
@@ -5672,30 +5944,60 @@
 // get a valid a match
 bool AudioPolicyManager::IOProfile::isCompatibleProfile(audio_devices_t device,
                                                             uint32_t samplingRate,
+                                                            uint32_t *updatedSamplingRate,
                                                             audio_format_t format,
                                                             audio_channel_mask_t channelMask,
                                                             audio_output_flags_t flags) const
 {
-    if (samplingRate == 0 || !audio_is_valid_format(format) || channelMask == 0) {
-         return false;
-     }
+    const bool isPlaybackThread = mType == AUDIO_PORT_TYPE_MIX && mRole == AUDIO_PORT_ROLE_SOURCE;
+    const bool isRecordThread = mType == AUDIO_PORT_TYPE_MIX && mRole == AUDIO_PORT_ROLE_SINK;
+    ALOG_ASSERT(isPlaybackThread != isRecordThread);
 
-     if ((mSupportedDevices.types() & device) != device) {
+    if ((mSupportedDevices.types() & device) != device) {
+        return false;
+    }
+
+    if (samplingRate == 0) {
          return false;
-     }
-     if ((mFlags & flags) != flags) {
+    }
+    uint32_t myUpdatedSamplingRate = samplingRate;
+    if (isPlaybackThread && checkExactSamplingRate(samplingRate) != NO_ERROR) {
          return false;
-     }
-     if (checkSamplingRate(samplingRate) != NO_ERROR) {
+    }
+    if (isRecordThread && checkCompatibleSamplingRate(samplingRate, &myUpdatedSamplingRate) !=
+            NO_ERROR) {
          return false;
-     }
-     if (checkChannelMask(channelMask) != NO_ERROR) {
-         return false;
-     }
-     if (checkFormat(format) != NO_ERROR) {
-         return false;
-     }
-     return true;
+    }
+
+    if (!audio_is_valid_format(format) || checkFormat(format) != NO_ERROR) {
+        return false;
+    }
+
+    if (isPlaybackThread && (!audio_is_output_channel(channelMask) ||
+            checkExactChannelMask(channelMask) != NO_ERROR)) {
+        return false;
+    }
+    if (isRecordThread && (!audio_is_input_channel(channelMask) ||
+            checkCompatibleChannelMask(channelMask) != NO_ERROR)) {
+        return false;
+    }
+
+    if (isPlaybackThread && (mFlags & flags) != flags) {
+        return false;
+    }
+    // The only input flag that is allowed to be different is the fast flag.
+    // An existing fast stream is compatible with a normal track request.
+    // An existing normal stream is compatible with a fast track request,
+    // but the fast request will be denied by AudioFlinger and converted to normal track.
+    if (isRecordThread && (((audio_input_flags_t) mFlags ^ (audio_input_flags_t) flags) &
+            ~AUDIO_INPUT_FLAG_FAST)) {
+        return false;
+    }
+
+    if (updatedSamplingRate != NULL) {
+        *updatedSamplingRate = myUpdatedSamplingRate;
+    }
+    return true;
 }
 
 void AudioPolicyManager::IOProfile::dump(int fd)
@@ -5905,6 +6207,24 @@
     return devices;
 }
 
+AudioPolicyManager::DeviceVector AudioPolicyManager::DeviceVector::getDevicesFromTypeAddr(
+        audio_devices_t type, String8 address) const
+{
+    DeviceVector devices;
+    //ALOGV("   looking for device=%x, addr=%s", type, address.string());
+    for (size_t i = 0; i < size(); i++) {
+        //ALOGV("     at i=%d: device=%x, addr=%s",
+        //        i, itemAt(i)->mDeviceType, itemAt(i)->mAddress.string());
+        if (itemAt(i)->mDeviceType == type) {
+            if (itemAt(i)->mAddress == address) {
+                //ALOGV("      found matching address %s", address.string());
+                devices.add(itemAt(i));
+            }
+        }
+    }
+    return devices;
+}
+
 sp<AudioPolicyManager::DeviceDescriptor> AudioPolicyManager::DeviceVector::getDeviceFromName(
         const String8& name) const
 {
@@ -5940,7 +6260,7 @@
 
 void AudioPolicyManager::DeviceDescriptor::toAudioPort(struct audio_port *port) const
 {
-    ALOGV("DeviceVector::toAudioPort() handle %d type %x", mId, mDeviceType);
+    ALOGV("DeviceDescriptor::toAudioPort() handle %d type %x", mId, mDeviceType);
     AudioPort::toAudioPort(port);
     port->id = mId;
     toAudioPortConfig(&port->active_config);
diff --git a/services/audiopolicy/AudioPolicyManager.h b/services/audiopolicy/AudioPolicyManager.h
index 4caecca..e28a362 100644
--- a/services/audiopolicy/AudioPolicyManager.h
+++ b/services/audiopolicy/AudioPolicyManager.h
@@ -53,10 +53,7 @@
 #define OFFLOAD_DEFAULT_MIN_DURATION_SECS 60
 
 #define MAX_MIXER_SAMPLING_RATE 48000
-#define MAX_MIXER_CHANNEL_COUNT 2
-// See AudioPort::compareFormats()
-#define WORST_MIXER_FORMAT AUDIO_FORMAT_PCM_16_BIT
-#define BEST_MIXER_FORMAT AUDIO_FORMAT_PCM_24_BIT_PACKED
+#define MAX_MIXER_CHANNEL_COUNT 8
 
 // ----------------------------------------------------------------------------
 // AudioPolicyManager implements audio policy manager behavior common to all platforms.
@@ -107,14 +104,18 @@
                                             uint32_t samplingRate,
                                             audio_format_t format,
                                             audio_channel_mask_t channelMask,
-                                            audio_in_acoustics_t acoustics);
+                                            audio_session_t session,
+                                            audio_input_flags_t flags);
 
         // indicates to the audio policy manager that the input starts being used.
-        virtual status_t startInput(audio_io_handle_t input);
+        virtual status_t startInput(audio_io_handle_t input,
+                                    audio_session_t session);
 
         // indicates to the audio policy manager that the input stops being used.
-        virtual status_t stopInput(audio_io_handle_t input);
-        virtual void releaseInput(audio_io_handle_t input);
+        virtual status_t stopInput(audio_io_handle_t input,
+                                   audio_session_t session);
+        virtual void releaseInput(audio_io_handle_t input,
+                                  audio_session_t session);
         virtual void closeAllInputs();
         virtual void initStreamVolume(audio_stream_type_t stream,
                                                     int indexMin,
@@ -239,8 +240,15 @@
             void loadGain(cnode *root, int index);
             void loadGains(cnode *root);
 
-            status_t checkSamplingRate(uint32_t samplingRate) const;
-            status_t checkChannelMask(audio_channel_mask_t channelMask) const;
+            // searches for an exact match
+            status_t checkExactSamplingRate(uint32_t samplingRate) const;
+            // searches for a compatible match, and returns the best match via updatedSamplingRate
+            status_t checkCompatibleSamplingRate(uint32_t samplingRate,
+                    uint32_t *updatedSamplingRate) const;
+            // searches for an exact match
+            status_t checkExactChannelMask(audio_channel_mask_t channelMask) const;
+            // searches for a compatible match, currently implemented for input channel masks only
+            status_t checkCompatibleChannelMask(audio_channel_mask_t channelMask) const;
             status_t checkFormat(audio_format_t format) const;
             status_t checkGain(const struct audio_gain_config *gainConfig, int index) const;
 
@@ -338,6 +346,8 @@
             DeviceVector getDevicesFromType(audio_devices_t types) const;
             sp<DeviceDescriptor> getDeviceFromId(audio_port_handle_t id) const;
             sp<DeviceDescriptor> getDeviceFromName(const String8& name) const;
+            DeviceVector getDevicesFromTypeAddr(audio_devices_t type, String8 address)
+                    const;
 
         private:
             void refreshTypes();
@@ -355,8 +365,13 @@
             IOProfile(const String8& name, audio_port_role_t role, const sp<HwModule>& module);
             virtual ~IOProfile();
 
+            // This method is used for both output and input.
+            // If parameter updatedSamplingRate is non-NULL, it is assigned the actual sample rate.
+            // For input, flags is interpreted as audio_input_flags_t.
+            // TODO: merge audio_output_flags_t and audio_input_flags_t.
             bool isCompatibleProfile(audio_devices_t device,
                                      uint32_t samplingRate,
+                                     uint32_t *updatedSamplingRate,
                                      audio_format_t format,
                                      audio_channel_mask_t channelMask,
                                      audio_output_flags_t flags) const;
@@ -467,8 +482,10 @@
             audio_devices_t mDevice;                    // current device this input is routed to
             audio_patch_handle_t mPatchHandle;
             uint32_t mRefCount;                         // number of AudioRecord clients using this output
+            uint32_t mOpenRefCount;
             audio_source_t mInputSource;                // input source selected by application (mediarecorder.h)
             const sp<IOProfile> mProfile;                  // I/O profile this output derives from
+            SortedVector<audio_session_t> mSessions;  // audio sessions attached to this input
 
             virtual void toAudioPortConfig(struct audio_port_config *dstConfig,
                                    const struct audio_port_config *srcConfig = NULL) const;
@@ -532,7 +549,8 @@
                              audio_devices_t device,
                              bool force = false,
                              int delayMs = 0,
-                             audio_patch_handle_t *patchHandle = NULL);
+                             audio_patch_handle_t *patchHandle = NULL,
+                             const char* address = NULL);
         status_t resetOutputDevice(audio_io_handle_t output,
                                    int delayMs = 0,
                                    audio_patch_handle_t *patchHandle = NULL);
@@ -671,10 +689,12 @@
 
         audio_io_handle_t selectOutput(const SortedVector<audio_io_handle_t>& outputs,
                                        audio_output_flags_t flags);
+        // samplingRate parameter is an in/out and so may be modified
         sp<IOProfile> getInputProfile(audio_devices_t device,
-                                   uint32_t samplingRate,
+                                   uint32_t& samplingRate,
                                    audio_format_t format,
-                                   audio_channel_mask_t channelMask);
+                                   audio_channel_mask_t channelMask,
+                                   audio_input_flags_t flags);
         sp<IOProfile> getProfileForDirectOutput(audio_devices_t device,
                                                        uint32_t samplingRate,
                                                        audio_format_t format,
@@ -771,11 +791,18 @@
         //    routing of notifications
         void handleNotificationRoutingForStream(audio_stream_type_t stream);
         static bool isVirtualInputDevice(audio_devices_t device);
+        static bool deviceDistinguishesOnAddress(audio_devices_t device);
+        // find the outputs on a given output descriptor that have the given address.
+        // to be called on an AudioOutputDescriptor whose supported devices (as defined
+        //   in mProfile->mSupportedDevices) matches the device whose address is to be matched.
+        // see deviceDistinguishesOnAddress(audio_devices_t) for whether the device type is one
+        //   where addresses are used to distinguish between one connected device and another.
+        void findIoHandlesByAddress(sp<AudioOutputDescriptor> desc /*in*/,
+                const String8 address /*in*/,
+                SortedVector<audio_io_handle_t>& outputs /*out*/);
         uint32_t nextUniqueId();
         uint32_t nextAudioPortGeneration();
         uint32_t curAudioPortGeneration() const { return mAudioPortGeneration; }
-        // converts device address to string sent to audio HAL via setParameters
-        static String8 addressToParameter(audio_devices_t device, const String8 address);
         // internal method to return the output handle for the given device and format
         audio_io_handle_t getOutputForDevice(
                 audio_devices_t device,
diff --git a/services/audiopolicy/AudioPolicyService.cpp b/services/audiopolicy/AudioPolicyService.cpp
old mode 100755
new mode 100644
index ae9cc35..7f14960
--- a/services/audiopolicy/AudioPolicyService.cpp
+++ b/services/audiopolicy/AudioPolicyService.cpp
@@ -514,21 +514,23 @@
                 break;
             }
         }
-        // release delayed commands wake lock
-        if (mAudioCommands.isEmpty()) {
-            release_wake_lock(mName.string());
-        }
         // release mLock before releasing strong reference on the service as
         // AudioPolicyService destructor calls AudioCommandThread::exit() which acquires mLock.
         mLock.unlock();
         svc.clear();
         mLock.lock();
-        if (!exitPending()) {
+        if (!exitPending() && mAudioCommands.isEmpty()) {
+            // release delayed commands wake lock
+            release_wake_lock(mName.string());
             ALOGV("AudioCommandThread() going to sleep");
             mWaitWorkCV.waitRelative(mLock, waitTime);
             ALOGV("AudioCommandThread() waking up");
         }
     }
+    // release delayed commands wake lock before quitting
+    if (!mAudioCommands.isEmpty()) {
+        release_wake_lock(mName.string());
+    }
     mLock.unlock();
     return false;
 }
diff --git a/services/audiopolicy/AudioPolicyService.h b/services/audiopolicy/AudioPolicyService.h
old mode 100755
new mode 100644
index 380fd5e..97236e3
--- a/services/audiopolicy/AudioPolicyService.h
+++ b/services/audiopolicy/AudioPolicyService.h
@@ -86,13 +86,17 @@
                                 int session = 0);
     virtual void releaseOutput(audio_io_handle_t output);
     virtual audio_io_handle_t getInput(audio_source_t inputSource,
-                                    uint32_t samplingRate = 0,
-                                    audio_format_t format = AUDIO_FORMAT_DEFAULT,
-                                    audio_channel_mask_t channelMask = 0,
-                                    int audioSession = 0);
-    virtual status_t startInput(audio_io_handle_t input);
-    virtual status_t stopInput(audio_io_handle_t input);
-    virtual void releaseInput(audio_io_handle_t input);
+                                    uint32_t samplingRate,
+                                    audio_format_t format,
+                                    audio_channel_mask_t channelMask,
+                                    int audioSession,
+                                    audio_input_flags_t flags);
+    virtual status_t startInput(audio_io_handle_t input,
+                                audio_session_t session);
+    virtual status_t stopInput(audio_io_handle_t input,
+                               audio_session_t session);
+    virtual void releaseInput(audio_io_handle_t input,
+                              audio_session_t session);
     virtual status_t initStreamVolume(audio_stream_type_t stream,
                                       int indexMin,
                                       int indexMax);
@@ -360,14 +364,13 @@
         // in case the audio policy manager has no specific requirements for the output being opened.
         // When the function returns, the parameter values reflect the actual values used by the audio hardware output stream.
         // The audio policy manager can check if the proposed parameters are suitable or not and act accordingly.
-        virtual audio_io_handle_t openOutput(audio_module_handle_t module,
-                                             audio_devices_t *pDevices,
-                                             uint32_t *pSamplingRate,
-                                             audio_format_t *pFormat,
-                                             audio_channel_mask_t *pChannelMask,
-                                             uint32_t *pLatencyMs,
-                                             audio_output_flags_t flags,
-                                             const audio_offload_info_t *offloadInfo = NULL);
+        virtual status_t openOutput(audio_module_handle_t module,
+                                    audio_io_handle_t *output,
+                                    audio_config_t *config,
+                                    audio_devices_t *devices,
+                                    const String8& address,
+                                    uint32_t *latencyMs,
+                                    audio_output_flags_t flags);
         // creates a special output that is duplicated to the two outputs passed as arguments. The duplication is performed by
         // a special mixer thread in the AudioFlinger.
         virtual audio_io_handle_t openDuplicateOutput(audio_io_handle_t output1, audio_io_handle_t output2);
@@ -385,10 +388,12 @@
 
         // opens an audio input
         virtual audio_io_handle_t openInput(audio_module_handle_t module,
-                                            audio_devices_t *pDevices,
-                                            uint32_t *pSamplingRate,
-                                            audio_format_t *pFormat,
-                                            audio_channel_mask_t *pChannelMask);
+                                            audio_io_handle_t *input,
+                                            audio_config_t *config,
+                                            audio_devices_t *devices,
+                                            const String8& address,
+                                            audio_source_t source,
+                                            audio_input_flags_t flags);
         // closes an audio input
         virtual status_t closeInput(audio_io_handle_t input);
         //
@@ -435,6 +440,8 @@
         virtual void onAudioPortListUpdate();
         virtual void onAudioPatchListUpdate();
 
+        virtual audio_unique_id_t newAudioUniqueId();
+
      private:
         AudioPolicyService *mAudioPolicyService;
     };
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index 648e82c..7766b90 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -235,7 +235,8 @@
     }
 
     struct camera_info info;
-    status_t rc = mModule->get_camera_info(cameraId, &info);
+    status_t rc = filterGetInfoErrorCode(
+        mModule->get_camera_info(cameraId, &info));
     cameraInfo->facing = info.facing;
     cameraInfo->orientation = info.orientation;
     return rc;
@@ -367,7 +368,7 @@
          * Normal HAL 2.1+ codepath.
          */
         struct camera_info info;
-        ret = mModule->get_camera_info(cameraId, &info);
+        ret = filterGetInfoErrorCode(mModule->get_camera_info(cameraId, &info));
         *cameraInfo = info.static_camera_characteristics;
     }
 
@@ -404,23 +405,28 @@
     return deviceVersion;
 }
 
-bool CameraService::isValidCameraId(int cameraId) {
-    int facing;
-    int deviceVersion = getDeviceVersion(cameraId, &facing);
-
-    switch(deviceVersion) {
-      case CAMERA_DEVICE_API_VERSION_1_0:
-      case CAMERA_DEVICE_API_VERSION_2_0:
-      case CAMERA_DEVICE_API_VERSION_2_1:
-      case CAMERA_DEVICE_API_VERSION_3_0:
-      case CAMERA_DEVICE_API_VERSION_3_1:
-      case CAMERA_DEVICE_API_VERSION_3_2:
-        return true;
-      default:
-        return false;
+status_t CameraService::filterOpenErrorCode(status_t err) {
+    switch(err) {
+        case NO_ERROR:
+        case -EBUSY:
+        case -EINVAL:
+        case -EUSERS:
+            return err;
+        default:
+            break;
     }
+    return -ENODEV;
+}
 
-    return false;
+status_t CameraService::filterGetInfoErrorCode(status_t err) {
+    switch(err) {
+        case NO_ERROR:
+        case -EINVAL:
+            return err;
+        default:
+            break;
+    }
+    return -ENODEV;
 }
 
 bool CameraService::setUpVendorTags() {
@@ -665,14 +671,6 @@
     int facing = -1;
     int deviceVersion = getDeviceVersion(cameraId, &facing);
 
-    // If there are other non-exclusive users of the camera,
-    //  this will tear them down before we can reuse the camera
-    if (isValidCameraId(cameraId)) {
-        // transition from PRESENT -> NOT_AVAILABLE
-        updateStatus(ICameraServiceListener::STATUS_NOT_AVAILABLE,
-                     cameraId);
-    }
-
     if (halVersion < 0 || halVersion == deviceVersion) {
         // Default path: HAL version is unspecified by caller, create CameraClient
         // based on device version reported by the HAL.
@@ -719,8 +717,6 @@
     status_t status = connectFinishUnsafe(client, client->getRemote());
     if (status != OK) {
         // this is probably not recoverable.. maybe the client can try again
-        // OK: we can only get here if we were originally in PRESENT state
-        updateStatus(ICameraServiceListener::STATUS_PRESENT, cameraId);
         return status;
     }
 
@@ -970,14 +966,6 @@
         int facing = -1;
         int deviceVersion = getDeviceVersion(cameraId, &facing);
 
-        // If there are other non-exclusive users of the camera,
-        //  this will tear them down before we can reuse the camera
-        if (isValidCameraId(cameraId)) {
-            // transition from PRESENT -> NOT_AVAILABLE
-            updateStatus(ICameraServiceListener::STATUS_NOT_AVAILABLE,
-                         cameraId);
-        }
-
         switch(deviceVersion) {
           case CAMERA_DEVICE_API_VERSION_1_0:
             ALOGW("Camera using old HAL version: %d", deviceVersion);
@@ -1002,8 +990,6 @@
         status_t status = connectFinishUnsafe(client, client->getRemote());
         if (status != OK) {
             // this is probably not recoverable.. maybe the client can try again
-            // OK: we can only get here if we were originally in PRESENT state
-            updateStatus(ICameraServiceListener::STATUS_PRESENT, cameraId);
             return status;
         }
 
@@ -1427,13 +1413,15 @@
 void CameraService::BasicClient::disconnect() {
     ALOGV("BasicClient::disconnect");
     mCameraService->removeClientByRemote(mRemoteBinder);
+
+    finishCameraOps();
     // client shouldn't be able to call into us anymore
     mClientPid = 0;
 }
 
 status_t CameraService::BasicClient::startCameraOps() {
     int32_t res;
-
+    // Notify app ops that the camera is not available
     mOpsCallback = new OpsCallback(this);
 
     {
@@ -1451,16 +1439,39 @@
                 mCameraId, String8(mClientPackageName).string());
         return PERMISSION_DENIED;
     }
+
     mOpsActive = true;
+
+    // Transition device availability listeners from PRESENT -> NOT_AVAILABLE
+    mCameraService->updateStatus(ICameraServiceListener::STATUS_NOT_AVAILABLE,
+            mCameraId);
+
     return OK;
 }
 
 status_t CameraService::BasicClient::finishCameraOps() {
+    // Check if startCameraOps succeeded, and if so, finish the camera op
     if (mOpsActive) {
+        // Notify app ops that the camera is available again
         mAppOpsManager.finishOp(AppOpsManager::OP_CAMERA, mClientUid,
                 mClientPackageName);
         mOpsActive = false;
+
+        // Notify device availability listeners that this camera is available
+        // again
+
+        StatusVector rejectSourceStates;
+        rejectSourceStates.push_back(ICameraServiceListener::STATUS_NOT_PRESENT);
+        rejectSourceStates.push_back(ICameraServiceListener::STATUS_ENUMERATING);
+
+        // Transition to PRESENT if the camera is not in either of above 2
+        // states
+        mCameraService->updateStatus(ICameraServiceListener::STATUS_PRESENT,
+                mCameraId,
+                &rejectSourceStates);
+
     }
+    // Always stop watching, even if no camera op is active
     mAppOpsManager.stopWatchingMode(mOpsCallback);
     mOpsCallback.clear();
 
@@ -1531,15 +1542,6 @@
     ALOGV("Client::disconnect");
     BasicClient::disconnect();
     mCameraService->setCameraFree(mCameraId);
-
-    StatusVector rejectSourceStates;
-    rejectSourceStates.push_back(ICameraServiceListener::STATUS_NOT_PRESENT);
-    rejectSourceStates.push_back(ICameraServiceListener::STATUS_ENUMERATING);
-
-    // Transition to PRESENT if the camera is not in either of above 2 states
-    mCameraService->updateStatus(ICameraServiceListener::STATUS_PRESENT,
-                                 mCameraId,
-                                 &rejectSourceStates);
 }
 
 CameraService::Client::OpsCallback::OpsCallback(wp<BasicClient> client):
diff --git a/services/camera/libcameraservice/CameraService.h b/services/camera/libcameraservice/CameraService.h
index 28590eb..cb98c96 100644
--- a/services/camera/libcameraservice/CameraService.h
+++ b/services/camera/libcameraservice/CameraService.h
@@ -138,6 +138,10 @@
     // CameraDeviceFactory functionality
     int                 getDeviceVersion(int cameraId, int* facing = NULL);
 
+    /////////////////////////////////////////////////////////////////////
+    // Shared utilities
+    static status_t     filterOpenErrorCode(status_t err);
+    static status_t     filterGetInfoErrorCode(status_t err);
 
     /////////////////////////////////////////////////////////////////////
     // CameraClient functionality
@@ -149,20 +153,19 @@
 
     class BasicClient : public virtual RefBase {
     public:
-        virtual status_t initialize(camera_module_t *module) = 0;
-
-        virtual void          disconnect() = 0;
+        virtual status_t    initialize(camera_module_t *module) = 0;
+        virtual void        disconnect();
 
         // because we can't virtually inherit IInterface, which breaks
         // virtual inheritance
         virtual sp<IBinder> asBinderWrapper() = 0;
 
         // Return the remote callback binder object (e.g. IProCameraCallbacks)
-        sp<IBinder>     getRemote() {
+        sp<IBinder>         getRemote() {
             return mRemoteBinder;
         }
 
-        virtual status_t      dump(int fd, const Vector<String16>& args) = 0;
+        virtual status_t    dump(int fd, const Vector<String16>& args) = 0;
 
     protected:
         BasicClient(const sp<CameraService>& cameraService,
diff --git a/services/camera/libcameraservice/api1/Camera2Client.cpp b/services/camera/libcameraservice/api1/Camera2Client.cpp
index 1642896..9721e13 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.cpp
+++ b/services/camera/libcameraservice/api1/Camera2Client.cpp
@@ -906,6 +906,13 @@
                 ALOGE("%s: Camera %d: Waiting to stop streaming failed: %s (%d)",
                         __FUNCTION__, mCameraId, strerror(-res), res);
             }
+            // Clean up recording stream
+            res = mStreamingProcessor->deleteRecordingStream();
+            if (res != OK) {
+                ALOGE("%s: Camera %d: Unable to delete recording stream before "
+                        "stop preview: %s (%d)",
+                        __FUNCTION__, mCameraId, strerror(-res), res);
+            }
             // no break
         case Parameters::WAITING_FOR_PREVIEW_WINDOW: {
             SharedParameters::Lock l(mParameters);
diff --git a/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp b/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp
index 3de5d90..312a78c 100644
--- a/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp
@@ -40,7 +40,12 @@
 
     {
         SharedParameters::Lock l(client->getParameters());
-        mUsePartialQuirk = l.mParameters.quirks.partialResults;
+
+        if (client->getCameraDeviceVersion() >= CAMERA_DEVICE_API_VERSION_3_2) {
+            mUsePartialResult = (mNumPartialResults > 1);
+        } else {
+            mUsePartialResult = l.mParameters.quirks.partialResults;
+        }
 
         // Initialize starting 3A state
         m3aState.afTriggerId = l.mParameters.afTriggerCounter;
@@ -63,17 +68,21 @@
         return false;
     }
 
-    bool partialResult = false;
-    if (mUsePartialQuirk) {
-        camera_metadata_entry_t entry;
-        entry = frame.mMetadata.find(ANDROID_QUIRKS_PARTIAL_RESULT);
-        if (entry.count > 0 &&
-                entry.data.u8[0] == ANDROID_QUIRKS_PARTIAL_RESULT_PARTIAL) {
-            partialResult = true;
+    bool isPartialResult = false;
+    if (mUsePartialResult) {
+        if (client->getCameraDeviceVersion() >= CAMERA_DEVICE_API_VERSION_3_2) {
+            isPartialResult = frame.mResultExtras.partialResultCount < mNumPartialResults;
+        } else {
+            camera_metadata_entry_t entry;
+            entry = frame.mMetadata.find(ANDROID_QUIRKS_PARTIAL_RESULT);
+            if (entry.count > 0 &&
+                    entry.data.u8[0] == ANDROID_QUIRKS_PARTIAL_RESULT_PARTIAL) {
+                isPartialResult = true;
+            }
         }
     }
 
-    if (!partialResult && processFaceDetect(frame.mMetadata, client) != OK) {
+    if (!isPartialResult && processFaceDetect(frame.mMetadata, client) != OK) {
         return false;
     }
 
diff --git a/services/camera/libcameraservice/api1/client2/FrameProcessor.h b/services/camera/libcameraservice/api1/client2/FrameProcessor.h
index 4afca50..68cf55b 100644
--- a/services/camera/libcameraservice/api1/client2/FrameProcessor.h
+++ b/services/camera/libcameraservice/api1/client2/FrameProcessor.h
@@ -91,8 +91,8 @@
         }
     } m3aState;
 
-    // Whether the partial result quirk is enabled for this device
-    bool mUsePartialQuirk;
+    // Whether the partial result is enabled for this device
+    bool mUsePartialResult;
 
     // Track most recent frame number for which 3A notifications were sent for.
     // Used to filter against sending 3A notifications for the same frame
diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp b/services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp
index 79f75a5..ab61c44 100644
--- a/services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp
+++ b/services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp
@@ -94,14 +94,14 @@
     entry = result.mMetadata.find(ANDROID_SENSOR_TIMESTAMP);
     nsecs_t timestamp = entry.data.i64[0];
     if (entry.count == 0) {
-        ALOGE("%s: metadata doesn't have timestamp, skip this result");
+        ALOGE("%s: metadata doesn't have timestamp, skip this result", __FUNCTION__);
         return;
     }
     (void)timestamp;
 
     entry = result.mMetadata.find(ANDROID_REQUEST_FRAME_COUNT);
     if (entry.count == 0) {
-        ALOGE("%s: metadata doesn't have frame number, skip this result");
+        ALOGE("%s: metadata doesn't have frame number, skip this result", __FUNCTION__);
         return;
     }
     int32_t frameNumber = entry.data.i32[0];
diff --git a/services/camera/libcameraservice/common/Camera2ClientBase.cpp b/services/camera/libcameraservice/common/Camera2ClientBase.cpp
index 13c9f48..24d173c 100644
--- a/services/camera/libcameraservice/common/Camera2ClientBase.cpp
+++ b/services/camera/libcameraservice/common/Camera2ClientBase.cpp
@@ -112,8 +112,6 @@
 
     TClientBase::mDestructionStarted = true;
 
-    TClientBase::finishCameraOps();
-
     disconnect();
 
     ALOGI("Closed Camera %d", TClientBase::mCameraId);
diff --git a/services/camera/libcameraservice/common/CameraDeviceBase.h b/services/camera/libcameraservice/common/CameraDeviceBase.h
index c7bd886..037695d 100644
--- a/services/camera/libcameraservice/common/CameraDeviceBase.h
+++ b/services/camera/libcameraservice/common/CameraDeviceBase.h
@@ -252,6 +252,10 @@
      */
     virtual status_t flush(int64_t *lastFrameNumber = NULL) = 0;
 
+    /**
+     * Get the HAL device version.
+     */
+    virtual uint32_t getDeviceVersion() = 0;
 };
 
 }; // namespace android
diff --git a/services/camera/libcameraservice/common/FrameProcessorBase.cpp b/services/camera/libcameraservice/common/FrameProcessorBase.cpp
index 482f687..29eb78f 100644
--- a/services/camera/libcameraservice/common/FrameProcessorBase.cpp
+++ b/services/camera/libcameraservice/common/FrameProcessorBase.cpp
@@ -29,7 +29,17 @@
 
 FrameProcessorBase::FrameProcessorBase(wp<CameraDeviceBase> device) :
     Thread(/*canCallJava*/false),
-    mDevice(device) {
+    mDevice(device),
+    mNumPartialResults(1) {
+    sp<CameraDeviceBase> cameraDevice = device.promote();
+    if (cameraDevice != 0 &&
+            cameraDevice->getDeviceVersion() >= CAMERA_DEVICE_API_VERSION_3_2) {
+        CameraMetadata staticInfo = cameraDevice->info();
+        camera_metadata_entry_t entry = staticInfo.find(ANDROID_REQUEST_PARTIAL_RESULT_COUNT);
+        if (entry.count > 0) {
+            mNumPartialResults = entry.data.i32[0];
+        }
+    }
 }
 
 FrameProcessorBase::~FrameProcessorBase() {
@@ -160,14 +170,18 @@
 
     camera_metadata_ro_entry_t entry;
 
-    // Quirks: Don't deliver partial results to listeners that don't want them
-    bool quirkIsPartial = false;
-    entry = result.mMetadata.find(ANDROID_QUIRKS_PARTIAL_RESULT);
-    if (entry.count != 0 &&
-            entry.data.u8[0] == ANDROID_QUIRKS_PARTIAL_RESULT_PARTIAL) {
-        ALOGV("%s: Camera %d: Not forwarding partial result to listeners",
-                __FUNCTION__, device->getId());
-        quirkIsPartial = true;
+    // Check if this result is partial.
+    bool isPartialResult = false;
+    if (device->getDeviceVersion() >= CAMERA_DEVICE_API_VERSION_3_2) {
+        isPartialResult = result.mResultExtras.partialResultCount < mNumPartialResults;
+    } else {
+        entry = result.mMetadata.find(ANDROID_QUIRKS_PARTIAL_RESULT);
+        if (entry.count != 0 &&
+                entry.data.u8[0] == ANDROID_QUIRKS_PARTIAL_RESULT_PARTIAL) {
+            ALOGV("%s: Camera %d: This is a partial result",
+                    __FUNCTION__, device->getId());
+            isPartialResult = true;
+        }
     }
 
     // TODO: instead of getting requestID from CameraMetadata, we should get it
@@ -186,9 +200,10 @@
         Mutex::Autolock l(mInputMutex);
 
         List<RangeListener>::iterator item = mRangeListeners.begin();
+        // Don't deliver partial results to listeners that don't want them
         while (item != mRangeListeners.end()) {
             if (requestId >= item->minId && requestId < item->maxId &&
-                    (!quirkIsPartial || item->sendPartials)) {
+                    (!isPartialResult || item->sendPartials)) {
                 sp<FilteredListener> listener = item->listener.promote();
                 if (listener == 0) {
                     item = mRangeListeners.erase(item);
diff --git a/services/camera/libcameraservice/common/FrameProcessorBase.h b/services/camera/libcameraservice/common/FrameProcessorBase.h
index 3649c45..a618d84 100644
--- a/services/camera/libcameraservice/common/FrameProcessorBase.h
+++ b/services/camera/libcameraservice/common/FrameProcessorBase.h
@@ -71,6 +71,9 @@
     };
     List<RangeListener> mRangeListeners;
 
+    // Number of partial result the HAL will potentially send.
+    int32_t mNumPartialResults;
+
     void processNewFrames(const sp<CameraDeviceBase> &device);
 
     virtual bool processSingleFrame(CaptureResult &result,
diff --git a/services/camera/libcameraservice/device1/CameraHardwareInterface.h b/services/camera/libcameraservice/device1/CameraHardwareInterface.h
index 2746f6f..6386838 100644
--- a/services/camera/libcameraservice/device1/CameraHardwareInterface.h
+++ b/services/camera/libcameraservice/device1/CameraHardwareInterface.h
@@ -105,8 +105,8 @@
                                                CAMERA_DEVICE_API_VERSION_1_0,
                                                (hw_device_t **)&mDevice);
         } else {
-            rc = module->methods->open(module, mName.string(),
-                                           (hw_device_t **)&mDevice);
+            rc = CameraService::filterOpenErrorCode(module->methods->open(
+                module, mName.string(), (hw_device_t **)&mDevice));
         }
         if (rc != OK) {
             ALOGE("Could not open camera %s: %d", mName.string(), rc);
diff --git a/services/camera/libcameraservice/device2/Camera2Device.cpp b/services/camera/libcameraservice/device2/Camera2Device.cpp
index c33c166..8c2520e 100644
--- a/services/camera/libcameraservice/device2/Camera2Device.cpp
+++ b/services/camera/libcameraservice/device2/Camera2Device.cpp
@@ -30,6 +30,7 @@
 #include <utils/Trace.h>
 #include <utils/Timers.h>
 #include "Camera2Device.h"
+#include "CameraService.h"
 
 namespace android {
 
@@ -67,8 +68,8 @@
 
     camera2_device_t *device;
 
-    res = module->common.methods->open(&module->common, name,
-            reinterpret_cast<hw_device_t**>(&device));
+    res = CameraService::filterOpenErrorCode(module->common.methods->open(
+        &module->common, name, reinterpret_cast<hw_device_t**>(&device)));
 
     if (res != OK) {
         ALOGE("%s: Could not open camera %d: %s (%d)", __FUNCTION__,
@@ -123,6 +124,7 @@
 
     mDeviceInfo = info.static_camera_characteristics;
     mHal2Device = device;
+    mDeviceVersion = device->common.version;
 
     return OK;
 }
@@ -589,6 +591,11 @@
     return waitUntilDrained();
 }
 
+uint32_t Camera2Device::getDeviceVersion() {
+    ATRACE_CALL();
+    return mDeviceVersion;
+}
+
 /**
  * Camera2Device::MetadataQueue
  */
@@ -1081,25 +1088,33 @@
     }
 
     if (mFormat == HAL_PIXEL_FORMAT_BLOB) {
-        res = native_window_set_buffers_geometry(mConsumerInterface.get(),
-                mSize, 1, mFormat);
+        res = native_window_set_buffers_dimensions(mConsumerInterface.get(),
+                mSize, 1);
         if (res != OK) {
-            ALOGE("%s: Unable to configure compressed stream buffer geometry"
+            ALOGE("%s: Unable to configure compressed stream buffer dimensions"
                     " %d x %d, size %zu for stream %d",
                     __FUNCTION__, mWidth, mHeight, mSize, mId);
             return res;
         }
     } else {
-        res = native_window_set_buffers_geometry(mConsumerInterface.get(),
-                mWidth, mHeight, mFormat);
+        res = native_window_set_buffers_dimensions(mConsumerInterface.get(),
+                mWidth, mHeight);
         if (res != OK) {
-            ALOGE("%s: Unable to configure stream buffer geometry"
-                    " %d x %d, format 0x%x for stream %d",
-                    __FUNCTION__, mWidth, mHeight, mFormat, mId);
+            ALOGE("%s: Unable to configure stream buffer dimensions"
+                    " %d x %d for stream %d",
+                    __FUNCTION__, mWidth, mHeight, mId);
             return res;
         }
     }
 
+    res = native_window_set_buffers_format(mConsumerInterface.get(), mFormat);
+    if (res != OK) {
+        ALOGE("%s: Unable to configure stream buffer format"
+                " %#x for stream %d",
+                __FUNCTION__, mFormat, mId);
+        return res;
+    }
+
     int maxConsumerBuffers;
     res = mConsumerInterface->query(mConsumerInterface.get(),
             NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS, &maxConsumerBuffers);
diff --git a/services/camera/libcameraservice/device2/Camera2Device.h b/services/camera/libcameraservice/device2/Camera2Device.h
index 22a13ac..46182f8 100644
--- a/services/camera/libcameraservice/device2/Camera2Device.h
+++ b/services/camera/libcameraservice/device2/Camera2Device.h
@@ -78,12 +78,16 @@
             buffer_handle_t *buffer, wp<BufferReleasedListener> listener);
     // Flush implemented as just a wait
     virtual status_t flush(int64_t *lastFrameNumber = NULL);
+    virtual uint32_t getDeviceVersion();
+
   private:
     const int mId;
     camera2_device_t *mHal2Device;
 
     CameraMetadata mDeviceInfo;
 
+    uint32_t mDeviceVersion;
+
     /**
      * Queue class for both sending requests to a camera2 device, and for
      * receiving frames from a camera2 device.
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index 6ceb9d4..a6214cc 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -48,6 +48,7 @@
 #include "device3/Camera3OutputStream.h"
 #include "device3/Camera3InputStream.h"
 #include "device3/Camera3ZslStream.h"
+#include "CameraService.h"
 
 using namespace android::camera3;
 
@@ -57,7 +58,8 @@
         mId(id),
         mHal3Device(NULL),
         mStatus(STATUS_UNINITIALIZED),
-        mUsePartialResultQuirk(false),
+        mUsePartialResult(false),
+        mNumPartialResults(1),
         mNextResultFrameNumber(0),
         mNextShutterFrameNumber(0),
         mListener(NULL)
@@ -103,8 +105,9 @@
     camera3_device_t *device;
 
     ATRACE_BEGIN("camera3->open");
-    res = module->common.methods->open(&module->common, deviceName.string(),
-            reinterpret_cast<hw_device_t**>(&device));
+    res = CameraService::filterOpenErrorCode(module->common.methods->open(
+        &module->common, deviceName.string(),
+        reinterpret_cast<hw_device_t**>(&device)));
     ATRACE_END();
 
     if (res != OK) {
@@ -123,7 +126,8 @@
     }
 
     camera_info info;
-    res = module->get_camera_info(mId, &info);
+    res = CameraService::filterGetInfoErrorCode(module->get_camera_info(
+        mId, &info));
     if (res != OK) return res;
 
     if (info.device_version != device->common.version) {
@@ -180,13 +184,20 @@
     mNeedConfig = true;
     mPauseStateNotify = false;
 
-    /** Check for quirks */
-
     // Will the HAL be sending in early partial result metadata?
-    camera_metadata_entry partialResultsQuirk =
-            mDeviceInfo.find(ANDROID_QUIRKS_USE_PARTIAL_RESULT);
-    if (partialResultsQuirk.count > 0 && partialResultsQuirk.data.u8[0] == 1) {
-        mUsePartialResultQuirk = true;
+    if (mDeviceVersion >= CAMERA_DEVICE_API_VERSION_3_2) {
+        camera_metadata_entry partialResultsCount =
+                mDeviceInfo.find(ANDROID_REQUEST_PARTIAL_RESULT_COUNT);
+        if (partialResultsCount.count > 0) {
+            mNumPartialResults = partialResultsCount.data.i32[0];
+            mUsePartialResult = (mNumPartialResults > 1);
+        }
+    } else {
+        camera_metadata_entry partialResultsQuirk =
+                mDeviceInfo.find(ANDROID_QUIRKS_USE_PARTIAL_RESULT);
+        if (partialResultsQuirk.count > 0 && partialResultsQuirk.data.u8[0] == 1) {
+            mUsePartialResult = true;
+        }
     }
 
     return OK;
@@ -1267,6 +1278,12 @@
     return res;
 }
 
+uint32_t Camera3Device::getDeviceVersion() {
+    ATRACE_CALL();
+    Mutex::Autolock il(mInterfaceLock);
+    return mDeviceVersion;
+}
+
 /**
  * Methods called by subclasses
  */
@@ -1483,6 +1500,9 @@
 
     ALOGV("%s: Camera %d: Stream configuration complete", __FUNCTION__, mId);
 
+    // tear down the deleted streams after configure streams.
+    mDeletedStreams.clear();
+
     return OK;
 }
 
@@ -1545,11 +1565,10 @@
 }
 
 /**
- * QUIRK(partial results)
  * Check if all 3A fields are ready, and send off a partial 3A-only result
  * to the output frame queue
  */
-bool Camera3Device::processPartial3AQuirk(
+bool Camera3Device::processPartial3AResult(
         uint32_t frameNumber,
         const CameraMetadata& partial, const CaptureResultExtras& resultExtras) {
 
@@ -1601,7 +1620,7 @@
     // In addition to the above fields, this means adding in
     //   android.request.frameCount
     //   android.request.requestId
-    //   android.quirks.partialResult
+    //   android.quirks.partialResult (for HAL version below HAL3.2)
 
     const size_t kMinimal3AResultEntries = 10;
 
@@ -1627,10 +1646,12 @@
         return false;
     }
 
-    static const uint8_t partialResult = ANDROID_QUIRKS_PARTIAL_RESULT_PARTIAL;
-    if (!insert3AResult(min3AResult.mMetadata, ANDROID_QUIRKS_PARTIAL_RESULT,
-            &partialResult, frameNumber)) {
-        return false;
+    if (mDeviceVersion < CAMERA_DEVICE_API_VERSION_3_2) {
+        static const uint8_t partialResult = ANDROID_QUIRKS_PARTIAL_RESULT_PARTIAL;
+        if (!insert3AResult(min3AResult.mMetadata, ANDROID_QUIRKS_PARTIAL_RESULT,
+                &partialResult, frameNumber)) {
+            return false;
+        }
     }
 
     if (!insert3AResult(min3AResult.mMetadata, ANDROID_CONTROL_AF_MODE,
@@ -1668,6 +1689,9 @@
         return false;
     }
 
+    // We only send the aggregated partial when all 3A related metadata are available
+    // For both API1 and API2.
+    // TODO: we probably should pass through all partials to API2 unconditionally.
     mResultSignal.signal();
 
     return true;
@@ -1726,8 +1750,21 @@
                 frameNumber);
         return;
     }
-    bool partialResultQuirk = false;
-    CameraMetadata collectedQuirkResult;
+
+    // For HAL3.2 or above, If HAL doesn't support partial, it must always set
+    // partial_result to 1 when metadata is included in this result.
+    if (!mUsePartialResult &&
+            mDeviceVersion >= CAMERA_DEVICE_API_VERSION_3_2 &&
+            result->result != NULL &&
+            result->partial_result != 1) {
+        SET_ERR("Result is malformed for frame %d: partial_result %u must be 1"
+                " if partial result is not supported",
+                frameNumber, result->partial_result);
+        return;
+    }
+
+    bool isPartialResult = false;
+    CameraMetadata collectedPartialResult;
     CaptureResultExtras resultExtras;
     bool hasInputBufferInRequest = false;
 
@@ -1749,28 +1786,47 @@
                 ", burstId = %" PRId32,
                 __FUNCTION__, request.resultExtras.requestId, request.resultExtras.frameNumber,
                 request.resultExtras.burstId);
+        // Always update the partial count to the latest one. When framework aggregates adjacent
+        // partial results into one, the latest partial count will be used.
+        request.resultExtras.partialResultCount = result->partial_result;
 
         // Check if this result carries only partial metadata
-        if (mUsePartialResultQuirk && result->result != NULL) {
-            camera_metadata_ro_entry_t partialResultEntry;
-            res = find_camera_metadata_ro_entry(result->result,
-                    ANDROID_QUIRKS_PARTIAL_RESULT, &partialResultEntry);
-            if (res != NAME_NOT_FOUND &&
-                    partialResultEntry.count > 0 &&
-                    partialResultEntry.data.u8[0] ==
-                    ANDROID_QUIRKS_PARTIAL_RESULT_PARTIAL) {
-                // A partial result. Flag this as such, and collect this
-                // set of metadata into the in-flight entry.
-                partialResultQuirk = true;
-                request.partialResultQuirk.collectedResult.append(
-                    result->result);
-                request.partialResultQuirk.collectedResult.erase(
-                    ANDROID_QUIRKS_PARTIAL_RESULT);
+        if (mUsePartialResult && result->result != NULL) {
+            if (mDeviceVersion >= CAMERA_DEVICE_API_VERSION_3_2) {
+                if (result->partial_result > mNumPartialResults || result->partial_result < 1) {
+                    SET_ERR("Result is malformed for frame %d: partial_result %u must be  in"
+                            " the range of [1, %d] when metadata is included in the result",
+                            frameNumber, result->partial_result, mNumPartialResults);
+                    return;
+                }
+                isPartialResult = (result->partial_result < mNumPartialResults);
+                if (isPartialResult) {
+                    request.partialResult.collectedResult.append(result->result);
+                }
+            } else {
+                camera_metadata_ro_entry_t partialResultEntry;
+                res = find_camera_metadata_ro_entry(result->result,
+                        ANDROID_QUIRKS_PARTIAL_RESULT, &partialResultEntry);
+                if (res != NAME_NOT_FOUND &&
+                        partialResultEntry.count > 0 &&
+                        partialResultEntry.data.u8[0] ==
+                        ANDROID_QUIRKS_PARTIAL_RESULT_PARTIAL) {
+                    // A partial result. Flag this as such, and collect this
+                    // set of metadata into the in-flight entry.
+                    isPartialResult = true;
+                    request.partialResult.collectedResult.append(
+                        result->result);
+                    request.partialResult.collectedResult.erase(
+                        ANDROID_QUIRKS_PARTIAL_RESULT);
+                }
+            }
+
+            if (isPartialResult) {
                 // Fire off a 3A-only result if possible
-                if (!request.partialResultQuirk.haveSent3A) {
-                    request.partialResultQuirk.haveSent3A =
-                            processPartial3AQuirk(frameNumber,
-                                    request.partialResultQuirk.collectedResult,
+                if (!request.partialResult.haveSent3A) {
+                    request.partialResult.haveSent3A =
+                            processPartial3AResult(frameNumber,
+                                    request.partialResult.collectedResult,
                                     request.resultExtras);
                 }
             }
@@ -1786,23 +1842,23 @@
          * - CAMERA3_MSG_SHUTTER (expected during normal operation)
          * - CAMERA3_MSG_ERROR (expected during flush)
          */
-        if (request.requestStatus == OK && timestamp == 0 && !partialResultQuirk) {
+        if (request.requestStatus == OK && timestamp == 0 && !isPartialResult) {
             SET_ERR("Called before shutter notify for frame %d",
                     frameNumber);
             return;
         }
 
         // Did we get the (final) result metadata for this capture?
-        if (result->result != NULL && !partialResultQuirk) {
+        if (result->result != NULL && !isPartialResult) {
             if (request.haveResultMetadata) {
                 SET_ERR("Called multiple times with metadata for frame %d",
                         frameNumber);
                 return;
             }
-            if (mUsePartialResultQuirk &&
-                    !request.partialResultQuirk.collectedResult.isEmpty()) {
-                collectedQuirkResult.acquire(
-                    request.partialResultQuirk.collectedResult);
+            if (mUsePartialResult &&
+                    !request.partialResult.collectedResult.isEmpty()) {
+                collectedPartialResult.acquire(
+                    request.partialResult.collectedResult);
             }
             request.haveResultMetadata = true;
         }
@@ -1842,7 +1898,7 @@
 
     // Process the result metadata, if provided
     bool gotResult = false;
-    if (result->result != NULL && !partialResultQuirk) {
+    if (result->result != NULL && !isPartialResult) {
         Mutex::Autolock l(mOutputLock);
 
         gotResult = true;
@@ -1871,8 +1927,8 @@
         }
 
         // Append any previous partials to form a complete result
-        if (mUsePartialResultQuirk && !collectedQuirkResult.isEmpty()) {
-            captureResult.mMetadata.append(collectedQuirkResult);
+        if (mUsePartialResult && !collectedPartialResult.isEmpty()) {
+            captureResult.mMetadata.append(collectedPartialResult);
         }
 
         captureResult.mMetadata.sort();
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index ea958b7..b1b0033 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -135,6 +135,8 @@
 
     virtual status_t flush(int64_t *lastFrameNumber = NULL);
 
+    virtual uint32_t getDeviceVersion();
+
     // Methods called by subclasses
     void             notifyStatus(bool idle); // updates from StatusTracker
 
@@ -168,7 +170,7 @@
 
     CameraMetadata             mDeviceInfo;
 
-    int                        mDeviceVersion;
+    uint32_t                   mDeviceVersion;
 
     enum Status {
         STATUS_ERROR,
@@ -199,8 +201,11 @@
     // Need to hold on to stream references until configure completes.
     Vector<sp<camera3::Camera3StreamInterface> > mDeletedStreams;
 
-    // Whether quirk ANDROID_QUIRKS_USE_PARTIAL_RESULT is enabled
-    bool                       mUsePartialResultQuirk;
+    // Whether the HAL will send partial result
+    bool                       mUsePartialResult;
+
+    // Number of partial results that will be delivered by the HAL.
+    uint32_t                   mNumPartialResults;
 
     /**** End scope for mLock ****/
 
@@ -507,17 +512,17 @@
         // If this request has any input buffer
         bool hasInputBuffer;
 
-        // Fields used by the partial result quirk only
-        struct PartialResultQuirkInFlight {
+        // Fields used by the partial result only
+        struct PartialResultInFlight {
             // Set by process_capture_result once 3A has been sent to clients
             bool    haveSent3A;
             // Result metadata collected so far, when partial results are in use
             CameraMetadata collectedResult;
 
-            PartialResultQuirkInFlight():
+            PartialResultInFlight():
                     haveSent3A(false) {
             }
-        } partialResultQuirk;
+        } partialResult;
 
         // Default constructor needed by KeyedVector
         InFlightRequest() :
@@ -564,11 +569,11 @@
             int32_t numBuffers, CaptureResultExtras resultExtras, bool hasInput);
 
     /**
-     * For the partial result quirk, check if all 3A state fields are available
+     * For the partial result, check if all 3A state fields are available
      * and if so, queue up 3A-only result to the client. Returns true if 3A
      * is sent.
      */
-    bool processPartial3AQuirk(uint32_t frameNumber,
+    bool processPartial3AResult(uint32_t frameNumber,
             const CameraMetadata& partial, const CaptureResultExtras& resultExtras);
 
     // Helpers for reading and writing 3A metadata into to/from partial results
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
index 7ec649b..169eb82 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
@@ -289,20 +289,25 @@
 
     if (mMaxSize == 0) {
         // For buffers of known size
-        res = native_window_set_buffers_geometry(mConsumer.get(),
-                camera3_stream::width, camera3_stream::height,
-                camera3_stream::format);
+        res = native_window_set_buffers_dimensions(mConsumer.get(),
+                camera3_stream::width, camera3_stream::height);
     } else {
         // For buffers with bounded size
-        res = native_window_set_buffers_geometry(mConsumer.get(),
-                mMaxSize, 1,
-                camera3_stream::format);
+        res = native_window_set_buffers_dimensions(mConsumer.get(),
+                mMaxSize, 1);
     }
     if (res != OK) {
-        ALOGE("%s: Unable to configure stream buffer geometry"
-                " %d x %d, format %x for stream %d",
+        ALOGE("%s: Unable to configure stream buffer dimensions"
+                " %d x %d (maxSize %zu) for stream %d",
                 __FUNCTION__, camera3_stream::width, camera3_stream::height,
-                camera3_stream::format, mId);
+                mMaxSize, mId);
+        return res;
+    }
+    res = native_window_set_buffers_format(mConsumer.get(),
+            camera3_stream::format);
+    if (res != OK) {
+        ALOGE("%s: Unable to configure stream buffer format %#x for stream %d",
+                __FUNCTION__, camera3_stream::format, mId);
         return res;
     }
 
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.h b/services/camera/libcameraservice/device3/Camera3OutputStream.h
index 6cbb9f4..f963326 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.h
@@ -76,6 +76,8 @@
             /*out*/
             sp<Fence> *releaseFenceOut);
 
+    virtual status_t disconnectLocked();
+
     sp<ANativeWindow> mConsumer;
   private:
     int               mTransform;
@@ -91,7 +93,6 @@
             nsecs_t timestamp);
 
     virtual status_t configureQueueLocked();
-    virtual status_t disconnectLocked();
 
     virtual status_t getEndpointUsage(uint32_t *usage);
 
diff --git a/services/camera/libcameraservice/device3/Camera3ZslStream.cpp b/services/camera/libcameraservice/device3/Camera3ZslStream.cpp
index 6c298f9..92bf81b 100644
--- a/services/camera/libcameraservice/device3/Camera3ZslStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3ZslStream.cpp
@@ -318,11 +318,21 @@
 status_t Camera3ZslStream::clearInputRingBuffer() {
     Mutex::Autolock l(mLock);
 
+    return clearInputRingBufferLocked();
+}
+
+status_t Camera3ZslStream::clearInputRingBufferLocked() {
     mInputBufferQueue.clear();
 
     return mProducer->clear();
 }
 
+status_t Camera3ZslStream::disconnectLocked() {
+    clearInputRingBufferLocked();
+
+    return Camera3OutputStream::disconnectLocked();
+}
+
 status_t Camera3ZslStream::setTransform(int /*transform*/) {
     ALOGV("%s: Not implemented", __FUNCTION__);
     return INVALID_OPERATION;
diff --git a/services/camera/libcameraservice/device3/Camera3ZslStream.h b/services/camera/libcameraservice/device3/Camera3ZslStream.h
index 6721832..d89c38d 100644
--- a/services/camera/libcameraservice/device3/Camera3ZslStream.h
+++ b/services/camera/libcameraservice/device3/Camera3ZslStream.h
@@ -96,6 +96,12 @@
             bool output,
             /*out*/
             sp<Fence> *releaseFenceOut);
+
+    // Disconnet the Camera3ZslStream specific bufferQueues.
+    virtual status_t disconnectLocked();
+
+    status_t clearInputRingBufferLocked();
+
 }; // class Camera3ZslStream
 
 }; // namespace camera3