Merge "AAC decoder: add support for controlling presentation parameters" into lmp-dev
diff --git a/drm/mediadrm/plugins/clearkey/DrmPlugin.h b/drm/mediadrm/plugins/clearkey/DrmPlugin.h
index bfbc6bf..27df9cd 100644
--- a/drm/mediadrm/plugins/clearkey/DrmPlugin.h
+++ b/drm/mediadrm/plugins/clearkey/DrmPlugin.h
@@ -104,6 +104,10 @@
         return android::ERROR_DRM_CANNOT_HANDLE;
     }
 
+    virtual status_t unprovisionDevice() {
+        return android::ERROR_DRM_CANNOT_HANDLE;
+    }
+
     virtual status_t getSecureStops(List<Vector<uint8_t> >& secureStops) {
         UNUSED(secureStops);
         return android::ERROR_DRM_CANNOT_HANDLE;
diff --git a/drm/mediadrm/plugins/mock/MockDrmCryptoPlugin.cpp b/drm/mediadrm/plugins/mock/MockDrmCryptoPlugin.cpp
index 6efc712..2ea554b 100644
--- a/drm/mediadrm/plugins/mock/MockDrmCryptoPlugin.cpp
+++ b/drm/mediadrm/plugins/mock/MockDrmCryptoPlugin.cpp
@@ -299,6 +299,12 @@
         return OK;
     }
 
+    status_t MockDrmPlugin::unprovisionDevice()
+    {
+        ALOGD("MockDrmPlugin::unprovisionDevice()");
+        return OK;
+    }
+
     status_t MockDrmPlugin::getSecureStops(List<Vector<uint8_t> > &secureStops)
     {
         Mutex::Autolock lock(mLock);
diff --git a/drm/mediadrm/plugins/mock/MockDrmCryptoPlugin.h b/drm/mediadrm/plugins/mock/MockDrmCryptoPlugin.h
index 97d7052..4b63299 100644
--- a/drm/mediadrm/plugins/mock/MockDrmCryptoPlugin.h
+++ b/drm/mediadrm/plugins/mock/MockDrmCryptoPlugin.h
@@ -85,6 +85,8 @@
                                           Vector<uint8_t> &certificate,
                                           Vector<uint8_t> &wrappedKey);
 
+        status_t unprovisionDevice();
+
         status_t getSecureStops(List<Vector<uint8_t> > &secureStops);
         status_t releaseSecureStops(Vector<uint8_t> const &ssRelease);
 
diff --git a/include/media/AudioSystem.h b/include/media/AudioSystem.h
index cf34991..dd63a23 100644
--- a/include/media/AudioSystem.h
+++ b/include/media/AudioSystem.h
@@ -149,6 +149,11 @@
     static void acquireAudioSessionId(int audioSession, pid_t pid);
     static void releaseAudioSessionId(int audioSession, pid_t pid);
 
+    // Get the HW synchronization source used for an audio session.
+    // Return a valid source or AUDIO_HW_SYNC_INVALID if an error occurs
+    // or no HW sync source is used.
+    static audio_hw_sync_t getAudioHwSyncForSession(audio_session_t sessionId);
+
     // types of io configuration change events received with ioConfigChanged()
     enum io_config_event {
         OUTPUT_OPENED,
@@ -309,6 +314,12 @@
     /* Set audio port configuration */
     static status_t setAudioPortConfig(const struct audio_port_config *config);
 
+
+    static status_t acquireSoundTriggerSession(audio_session_t *session,
+                                           audio_io_handle_t *ioHandle,
+                                           audio_devices_t *device);
+    static status_t releaseSoundTriggerSession(audio_session_t session);
+
     // ----------------------------------------------------------------------------
 
     class AudioPortCallback : public RefBase
diff --git a/include/media/IAudioFlinger.h b/include/media/IAudioFlinger.h
index 82ec09c..31a14f0 100644
--- a/include/media/IAudioFlinger.h
+++ b/include/media/IAudioFlinger.h
@@ -235,6 +235,8 @@
     /* Set audio port configuration */
     virtual status_t setAudioPortConfig(const struct audio_port_config *config) = 0;
 
+    /* Get the HW synchronization source used for an audio session */
+    virtual audio_hw_sync_t getAudioHwSyncForSession(audio_session_t sessionId) = 0;
 };
 
 
diff --git a/include/media/IAudioPolicyService.h b/include/media/IAudioPolicyService.h
index abbda32..c251439 100644
--- a/include/media/IAudioPolicyService.h
+++ b/include/media/IAudioPolicyService.h
@@ -136,6 +136,12 @@
     virtual status_t setAudioPortConfig(const struct audio_port_config *config) = 0;
 
     virtual void registerClient(const sp<IAudioPolicyServiceClient>& client) = 0;
+
+    virtual status_t acquireSoundTriggerSession(audio_session_t *session,
+                                           audio_io_handle_t *ioHandle,
+                                           audio_devices_t *device) = 0;
+
+    virtual status_t releaseSoundTriggerSession(audio_session_t session) = 0;
 };
 
 
diff --git a/include/media/IDrm.h b/include/media/IDrm.h
index 32ae28e..68de87a 100644
--- a/include/media/IDrm.h
+++ b/include/media/IDrm.h
@@ -70,6 +70,8 @@
                                               Vector<uint8_t> &certificate,
                                               Vector<uint8_t> &wrappedKey) = 0;
 
+    virtual status_t unprovisionDevice() = 0;
+
     virtual status_t getSecureStops(List<Vector<uint8_t> > &secureStops) = 0;
 
     virtual status_t releaseSecureStops(Vector<uint8_t> const &ssRelease) = 0;
diff --git a/include/media/IMediaCodecList.h b/include/media/IMediaCodecList.h
new file mode 100644
index 0000000..e93ea8b
--- /dev/null
+++ b/include/media/IMediaCodecList.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_IMEDIACODECLIST_H
+#define ANDROID_IMEDIACODECLIST_H
+
+#include <utils/Errors.h>  // for status_t
+#include <binder/IInterface.h>
+#include <binder/Parcel.h>
+
+namespace android {
+
+struct MediaCodecInfo;
+
+class IMediaCodecList: public IInterface
+{
+public:
+    DECLARE_META_INTERFACE(MediaCodecList);
+
+    virtual size_t countCodecs() const = 0;
+    virtual sp<MediaCodecInfo> getCodecInfo(size_t index) const = 0;
+
+    virtual ssize_t findCodecByType(
+            const char *type, bool encoder, size_t startIndex = 0) const = 0;
+
+    virtual ssize_t findCodecByName(const char *name) const = 0;
+};
+
+// ----------------------------------------------------------------------------
+
+class BnMediaCodecList: public BnInterface<IMediaCodecList>
+{
+public:
+    virtual status_t    onTransact( uint32_t code,
+                                    const Parcel& data,
+                                    Parcel* reply,
+                                    uint32_t flags = 0);
+};
+
+}; // namespace android
+
+#endif // ANDROID_IMEDIACODECLIST_H
diff --git a/include/media/IMediaPlayerService.h b/include/media/IMediaPlayerService.h
index 5b45376..d7e584a 100644
--- a/include/media/IMediaPlayerService.h
+++ b/include/media/IMediaPlayerService.h
@@ -34,6 +34,7 @@
 struct ICrypto;
 struct IDrm;
 struct IHDCP;
+struct IMediaCodecList;
 struct IMediaHTTPService;
 class IMediaRecorder;
 class IOMX;
@@ -65,6 +66,7 @@
     virtual sp<ICrypto>         makeCrypto() = 0;
     virtual sp<IDrm>            makeDrm() = 0;
     virtual sp<IHDCP>           makeHDCP(bool createEncryptionModule) = 0;
+    virtual sp<IMediaCodecList> getCodecList() const = 0;
 
     // Connects to a remote display.
     // 'iface' specifies the address of the local interface on which to listen for
diff --git a/include/media/MediaCodecInfo.h b/include/media/MediaCodecInfo.h
new file mode 100644
index 0000000..29315ce
--- /dev/null
+++ b/include/media/MediaCodecInfo.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright 2014, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MEDIA_CODEC_INFO_H_
+
+#define MEDIA_CODEC_INFO_H_
+
+#include <binder/Parcel.h>
+#include <media/stagefright/foundation/ABase.h>
+#include <media/stagefright/foundation/AString.h>
+
+#include <sys/types.h>
+#include <utils/Errors.h>
+#include <utils/KeyedVector.h>
+#include <utils/RefBase.h>
+#include <utils/Vector.h>
+#include <utils/StrongPointer.h>
+
+namespace android {
+
+struct AMessage;
+struct Parcel;
+struct CodecCapabilities;
+
+struct MediaCodecInfo : public RefBase {
+    struct ProfileLevel {
+        uint32_t mProfile;
+        uint32_t mLevel;
+    };
+
+    struct Capabilities : public RefBase {
+        void getSupportedProfileLevels(Vector<ProfileLevel> *profileLevels) const;
+        void getSupportedColorFormats(Vector<uint32_t> *colorFormats) const;
+        uint32_t getFlags() const;
+        const sp<AMessage> &getDetails() const;
+
+    private:
+        Vector<ProfileLevel> mProfileLevels;
+        Vector<uint32_t> mColorFormats;
+        uint32_t mFlags;
+        sp<AMessage> mDetails;
+
+        Capabilities();
+
+        // read object from parcel even if object creation fails
+        static sp<Capabilities> FromParcel(const Parcel &parcel);
+        status_t writeToParcel(Parcel *parcel) const;
+
+        DISALLOW_EVIL_CONSTRUCTORS(Capabilities);
+
+        friend class MediaCodecInfo;
+    };
+
+    bool isEncoder() const;
+    bool hasQuirk(const char *name) const;
+    void getSupportedMimes(Vector<AString> *mimes) const;
+    const sp<Capabilities> &getCapabilitiesFor(const char *mime) const;
+    const char *getCodecName() const;
+
+    /**
+     * Serialization over Binder
+     */
+    static sp<MediaCodecInfo> FromParcel(const Parcel &parcel);
+    status_t writeToParcel(Parcel *parcel) const;
+
+private:
+    // variable set only in constructor - these are accessed by MediaCodecList
+    // to avoid duplication of same variables
+    AString mName;
+    bool mIsEncoder;
+    bool mHasSoleMime; // was initialized with mime
+
+    Vector<AString> mQuirks;
+    KeyedVector<AString, sp<Capabilities> > mCaps;
+
+    sp<Capabilities> mCurrentCaps; // currently initalized capabilities
+
+    ssize_t getCapabilityIndex(const char *mime) const;
+
+    /* Methods used by MediaCodecList to construct the info
+     * object from XML.
+     *
+     * After info object is created:
+     * - additional quirks can be added
+     * - additional mimes can be added
+     *   - OMX codec capabilities can be set for the current mime-type
+     *   - a capability detail can be set for the current mime-type
+     *   - a feature can be set for the current mime-type
+     *   - info object can be completed when parsing of a mime-type is done
+     */
+    MediaCodecInfo(AString name, bool encoder, const char *mime);
+    void addQuirk(const char *name);
+    status_t addMime(const char *mime);
+    status_t initializeCapabilities(const CodecCapabilities &caps);
+    void addDetail(const AString &key, const AString &value);
+    void addFeature(const AString &key, int32_t value);
+    void complete();
+
+    DISALLOW_EVIL_CONSTRUCTORS(MediaCodecInfo);
+
+    friend class MediaCodecList;
+};
+
+}  // namespace android
+
+#endif  // MEDIA_CODEC_INFO_H_
+
+
diff --git a/include/media/stagefright/ACodec.h b/include/media/stagefright/ACodec.h
index ca5076d..3a6bb9e 100644
--- a/include/media/stagefright/ACodec.h
+++ b/include/media/stagefright/ACodec.h
@@ -76,6 +76,10 @@
         DISALLOW_EVIL_CONSTRUCTORS(PortDescription);
     };
 
+    static bool isFlexibleColorFormat(
+        const sp<IOMX> &omx, IOMX::node_id node,
+        uint32_t colorFormat, OMX_U32 *flexibleEquivalent);
+
 protected:
     virtual ~ACodec();
 
@@ -316,7 +320,10 @@
             OMX_ERRORTYPE error = OMX_ErrorUndefined,
             status_t internalError = UNKNOWN_ERROR);
 
-    static void describeDefaultColorFormat(DescribeColorFormatParams &describeParams);
+    static bool describeDefaultColorFormat(DescribeColorFormatParams &describeParams);
+    static bool describeColorFormat(
+        const sp<IOMX> &omx, IOMX::node_id node,
+        DescribeColorFormatParams &describeParams);
 
     status_t requestIDRFrame();
     status_t setParameters(const sp<AMessage> &params);
diff --git a/include/media/stagefright/DataSource.h b/include/media/stagefright/DataSource.h
index f8787dd..3fb9e36 100644
--- a/include/media/stagefright/DataSource.h
+++ b/include/media/stagefright/DataSource.h
@@ -31,6 +31,7 @@
 namespace android {
 
 struct AMessage;
+struct AString;
 struct IMediaHTTPService;
 class String8;
 
@@ -46,7 +47,8 @@
     static sp<DataSource> CreateFromURI(
             const sp<IMediaHTTPService> &httpService,
             const char *uri,
-            const KeyedVector<String8, String8> *headers = NULL);
+            const KeyedVector<String8, String8> *headers = NULL,
+            AString *sniffedMIME = NULL);
 
     DataSource() {}
 
@@ -100,6 +102,10 @@
     virtual ~DataSource() {}
 
 private:
+    enum {
+        kDefaultMetaSize = 200000,
+    };
+
     static Mutex gSnifferMutex;
     static List<SnifferFunc> gSniffers;
     static bool gSniffersRegistered;
diff --git a/include/media/stagefright/MediaCodec.h b/include/media/stagefright/MediaCodec.h
index 4ff0d62..b87a09e 100644
--- a/include/media/stagefright/MediaCodec.h
+++ b/include/media/stagefright/MediaCodec.h
@@ -55,10 +55,10 @@
     struct BatteryNotifier;
 
     static sp<MediaCodec> CreateByType(
-            const sp<ALooper> &looper, const char *mime, bool encoder);
+            const sp<ALooper> &looper, const char *mime, bool encoder, status_t *err = NULL);
 
     static sp<MediaCodec> CreateByComponentName(
-            const sp<ALooper> &looper, const char *name);
+            const sp<ALooper> &looper, const char *name, status_t *err = NULL);
 
     status_t configure(
             const sp<AMessage> &format,
@@ -223,6 +223,7 @@
     AString mComponentName;
     uint32_t mReplyID;
     uint32_t mFlags;
+    status_t mStickyError;
     sp<Surface> mNativeWindow;
     SoftwareRenderer *mSoftRenderer;
     sp<AMessage> mOutputFormat;
@@ -304,6 +305,18 @@
     void updateBatteryStat();
     bool isExecuting() const;
 
+    /* called to get the last codec error when the sticky flag is set.
+     * if no such codec error is found, returns UNKNOWN_ERROR.
+     */
+    inline status_t getStickyError() const {
+        return mStickyError != 0 ? mStickyError : UNKNOWN_ERROR;
+    }
+
+    inline void setStickyError(status_t err) {
+        mFlags |= kFlagStickyError;
+        mStickyError = err;
+    }
+
     DISALLOW_EVIL_CONSTRUCTORS(MediaCodec);
 };
 
diff --git a/include/media/stagefright/MediaCodecList.h b/include/media/stagefright/MediaCodecList.h
index c11fcc9..8605d99 100644
--- a/include/media/stagefright/MediaCodecList.h
+++ b/include/media/stagefright/MediaCodecList.h
@@ -20,6 +20,9 @@
 
 #include <media/stagefright/foundation/ABase.h>
 #include <media/stagefright/foundation/AString.h>
+#include <media/IMediaCodecList.h>
+#include <media/IOMX.h>
+#include <media/MediaCodecInfo.h>
 
 #include <sys/types.h>
 #include <utils/Errors.h>
@@ -31,32 +34,22 @@
 
 struct AMessage;
 
-struct MediaCodecList {
-    static const MediaCodecList *getInstance();
+struct MediaCodecList : public BnMediaCodecList {
+    static sp<IMediaCodecList> getInstance();
 
-    ssize_t findCodecByType(
+    virtual ssize_t findCodecByType(
             const char *type, bool encoder, size_t startIndex = 0) const;
 
-    ssize_t findCodecByName(const char *name) const;
+    virtual ssize_t findCodecByName(const char *name) const;
 
-    size_t countCodecs() const;
-    const char *getCodecName(size_t index) const;
-    bool isEncoder(size_t index) const;
-    bool codecHasQuirk(size_t index, const char *quirkName) const;
+    virtual size_t countCodecs() const;
 
-    status_t getSupportedTypes(size_t index, Vector<AString> *types) const;
+    virtual sp<MediaCodecInfo> getCodecInfo(size_t index) const {
+        return mCodecInfos.itemAt(index);
+    }
 
-    struct ProfileLevel {
-        uint32_t mProfile;
-        uint32_t mLevel;
-    };
-    status_t getCodecCapabilities(
-            size_t index, const char *type,
-            Vector<ProfileLevel> *profileLevels,
-            Vector<uint32_t> *colorFormats,
-            uint32_t *flags,
-            // TODO default argument is only for compatibility with existing JNI
-            sp<AMessage> *capabilities = NULL) const;
+    // to be used by MediaPlayerService alone
+    static sp<IMediaCodecList> getLocalInstance();
 
 private:
     enum Section {
@@ -70,17 +63,8 @@
         SECTION_INCLUDE,
     };
 
-    struct CodecInfo {
-        AString mName;
-        bool mIsEncoder;
-        uint32_t mTypes;
-        uint32_t mSoleType;
-        uint32_t mQuirks;
-        KeyedVector<uint32_t, sp<AMessage> > mCaps;
-        sp<AMessage> mCurrentCaps;
-    };
-
-    static MediaCodecList *sCodecList;
+    static sp<IMediaCodecList> sCodecList;
+    static sp<IMediaCodecList> sRemoteList;
 
     status_t mInitCheck;
     Section mCurrentSection;
@@ -88,9 +72,9 @@
     int32_t mDepth;
     AString mHrefBase;
 
-    Vector<CodecInfo> mCodecInfos;
-    KeyedVector<AString, size_t> mCodecQuirks;
-    KeyedVector<AString, size_t> mTypes;
+    Vector<sp<MediaCodecInfo> > mCodecInfos;
+    sp<MediaCodecInfo> mCurrentInfo;
+    sp<IOMX> mOMX;
 
     MediaCodecList();
     ~MediaCodecList();
@@ -117,6 +101,8 @@
     status_t addFeature(const char **attrs);
     void addType(const char *name);
 
+    status_t initializeCapabilities(const char *type);
+
     DISALLOW_EVIL_CONSTRUCTORS(MediaCodecList);
 };
 
diff --git a/include/media/stagefright/MediaCodecSource.h b/include/media/stagefright/MediaCodecSource.h
index 4b18a0b..e1b2830 100644
--- a/include/media/stagefright/MediaCodecSource.h
+++ b/include/media/stagefright/MediaCodecSource.h
@@ -106,7 +106,6 @@
     bool mStarted;
     bool mStopping;
     bool mDoMoreWorkPending;
-    bool mPullerReachedEOS;
     sp<AMessage> mEncoderActivityNotify;
     sp<IGraphicBufferProducer> mGraphicBufferProducer;
     Vector<sp<ABuffer> > mEncoderInputBuffers;
@@ -123,7 +122,7 @@
     Mutex mOutputBufferLock;
     Condition mOutputBufferCond;
     List<MediaBuffer*> mOutputBufferQueue;
-    bool mEncodedReachedEOS;
+    bool mEncoderReachedEOS;
     status_t mErrorCode;
 
     DISALLOW_EVIL_CONSTRUCTORS(MediaCodecSource);
diff --git a/include/media/stagefright/MediaErrors.h b/include/media/stagefright/MediaErrors.h
index 686f286..7540e07 100644
--- a/include/media/stagefright/MediaErrors.h
+++ b/include/media/stagefright/MediaErrors.h
@@ -23,6 +23,18 @@
 namespace android {
 
 enum {
+    // status_t map for errors in the media framework
+    // OK or NO_ERROR or 0 represents no error.
+
+    // See system/core/include/utils/Errors.h
+    // System standard errors from -1 through (possibly) -133
+    //
+    // Errors with special meanings and side effects.
+    // INVALID_OPERATION:  Operation attempted in an illegal state (will try to signal to app).
+    // DEAD_OBJECT:        Signal from CodecBase to MediaCodec that MediaServer has died.
+    // NAME_NOT_FOUND:     Signal from CodecBase to MediaCodec that the component was not found.
+
+    // Media errors
     MEDIA_ERROR_BASE        = -1000,
 
     ERROR_ALREADY_CONNECTED = MEDIA_ERROR_BASE,
@@ -64,8 +76,34 @@
     // Heartbeat Error Codes
     HEARTBEAT_ERROR_BASE = -3000,
     ERROR_HEARTBEAT_TERMINATE_REQUESTED                     = HEARTBEAT_ERROR_BASE,
+
+    // NDK Error codes
+    // frameworks/av/include/ndk/NdkMediaError.h
+    // from -10000 (0xFFFFD8F0 - 0xFFFFD8EC)
+    // from -20000 (0xFFFFB1E0 - 0xFFFFB1D7)
+
+    // Codec errors are permitted from 0x80001000 through 0x9000FFFF
+    ERROR_CODEC_MAX    = (signed)0x9000FFFF,
+    ERROR_CODEC_MIN    = (signed)0x80001000,
+
+    // System unknown errors from 0x80000000 - 0x80000007 (INT32_MIN + 7)
+    // See system/core/include/utils/Errors.h
 };
 
+// action codes for MediaCodecs that tell the upper layer and application
+// the severity of any error.
+enum ActionCode {
+    ACTION_CODE_FATAL,
+    ACTION_CODE_TRANSIENT,
+    ACTION_CODE_RECOVERABLE,
+};
+
+// returns true if err is a recognized DRM error code
+static inline bool isCryptoError(status_t err) {
+    return (ERROR_DRM_RESOURCE_BUSY <= err && err <= ERROR_DRM_UNKNOWN)
+            || (ERROR_DRM_VENDOR_MIN <= err && err <= ERROR_DRM_VENDOR_MAX);
+}
+
 }  // namespace android
 
 #endif  // MEDIA_ERRORS_H_
diff --git a/include/media/stagefright/MediaExtractor.h b/include/media/stagefright/MediaExtractor.h
index 3076a96..183933a 100644
--- a/include/media/stagefright/MediaExtractor.h
+++ b/include/media/stagefright/MediaExtractor.h
@@ -65,6 +65,8 @@
     virtual char* getDrmTrackInfo(size_t trackID, int *len) {
         return NULL;
     }
+    virtual void setUID(uid_t uid) {
+    }
 
 protected:
     MediaExtractor() : mIsDrm(false) {}
diff --git a/include/media/stagefright/OMXCodec.h b/include/media/stagefright/OMXCodec.h
index 5590b60..e341160 100644
--- a/include/media/stagefright/OMXCodec.h
+++ b/include/media/stagefright/OMXCodec.h
@@ -28,7 +28,7 @@
 
 namespace android {
 
-struct MediaCodecList;
+struct MediaCodecInfo;
 class MemoryDealer;
 struct OMXCodecObserver;
 struct CodecProfileLevel;
@@ -115,7 +115,7 @@
             Vector<CodecNameAndQuirks> *matchingCodecNamesAndQuirks);
 
     static uint32_t getComponentQuirks(
-            const MediaCodecList *list, size_t index);
+            const sp<MediaCodecInfo> &list);
 
     static bool findCodecQuirks(const char *componentName, uint32_t *quirks);
 
diff --git a/include/media/stagefright/foundation/ABase.h b/include/media/stagefright/foundation/ABase.h
index 949d49e..72e3d87 100644
--- a/include/media/stagefright/foundation/ABase.h
+++ b/include/media/stagefright/foundation/ABase.h
@@ -18,6 +18,8 @@
 
 #define A_BASE_H_
 
+#define ARRAY_SIZE(a) (sizeof(a) / sizeof(*(a)))
+
 #define DISALLOW_EVIL_CONSTRUCTORS(name) \
     name(const name &); \
     name &operator=(const name &)
diff --git a/include/media/stagefright/foundation/AString.h b/include/media/stagefright/foundation/AString.h
index 4be3c6d..7c98699 100644
--- a/include/media/stagefright/foundation/AString.h
+++ b/include/media/stagefright/foundation/AString.h
@@ -18,11 +18,13 @@
 
 #define A_STRING_H_
 
+#include <utils/Errors.h>
 #include <sys/types.h>
 
 namespace android {
 
 struct String8;
+struct Parcel;
 
 struct AString {
     AString();
@@ -77,7 +79,9 @@
     bool operator>(const AString &other) const;
 
     int compare(const AString &other) const;
+    int compareIgnoreCase(const AString &other) const;
 
+    bool equalsIgnoreCase(const AString &other) const;
     bool startsWith(const char *prefix) const;
     bool endsWith(const char *suffix) const;
     bool startsWithIgnoreCase(const char *prefix) const;
@@ -85,6 +89,9 @@
 
     void tolower();
 
+    static AString FromParcel(const Parcel &parcel);
+    status_t writeToParcel(Parcel *parcel) const;
+
 private:
     static const char *kEmptyString;
 
diff --git a/include/soundtrigger/ISoundTriggerClient.h b/include/soundtrigger/ISoundTriggerClient.h
index 7f86d02..480429a 100644
--- a/include/soundtrigger/ISoundTriggerClient.h
+++ b/include/soundtrigger/ISoundTriggerClient.h
@@ -31,6 +31,10 @@
 
     virtual void onRecognitionEvent(const sp<IMemory>& eventMemory) = 0;
 
+    virtual void onSoundModelEvent(const sp<IMemory>& eventMemory) = 0;
+
+    virtual void onServiceStateChange(const sp<IMemory>& eventMemory) = 0;
+
 };
 
 // ----------------------------------------------------------------------------
diff --git a/include/soundtrigger/ISoundTriggerHwService.h b/include/soundtrigger/ISoundTriggerHwService.h
index 05a764a..ae0cb01 100644
--- a/include/soundtrigger/ISoundTriggerHwService.h
+++ b/include/soundtrigger/ISoundTriggerHwService.h
@@ -39,6 +39,8 @@
     virtual status_t attach(const sound_trigger_module_handle_t handle,
                                       const sp<ISoundTriggerClient>& client,
                                       sp<ISoundTrigger>& module) = 0;
+
+    virtual status_t setCaptureState(bool active) = 0;
 };
 
 // ----------------------------------------------------------------------------
diff --git a/include/soundtrigger/SoundTrigger.h b/include/soundtrigger/SoundTrigger.h
index 1f7f286..bf5e1de 100644
--- a/include/soundtrigger/SoundTrigger.h
+++ b/include/soundtrigger/SoundTrigger.h
@@ -18,6 +18,7 @@
 #define ANDROID_HARDWARE_SOUNDTRIGGER_H
 
 #include <binder/IBinder.h>
+#include <utils/threads.h>
 #include <soundtrigger/SoundTriggerCallback.h>
 #include <soundtrigger/ISoundTrigger.h>
 #include <soundtrigger/ISoundTriggerHwService.h>
@@ -32,12 +33,15 @@
                         public IBinder::DeathRecipient
 {
 public:
+
+    virtual ~SoundTrigger();
+
     static  status_t listModules(struct sound_trigger_module_descriptor *modules,
                                  uint32_t *numModules);
     static  sp<SoundTrigger> attach(const sound_trigger_module_handle_t module,
                                        const sp<SoundTriggerCallback>& callback);
 
-            virtual ~SoundTrigger();
+    static  status_t setCaptureState(bool active);
 
             void detach();
 
@@ -51,6 +55,8 @@
 
             // BpSoundTriggerClient
             virtual void onRecognitionEvent(const sp<IMemory>& eventMemory);
+            virtual void onSoundModelEvent(const sp<IMemory>& eventMemory);
+            virtual void onServiceStateChange(const sp<IMemory>& eventMemory);
 
             //IBinder::DeathRecipient
             virtual void binderDied(const wp<IBinder>& who);
diff --git a/include/soundtrigger/SoundTriggerCallback.h b/include/soundtrigger/SoundTriggerCallback.h
index 8a5ba02..b5277f2 100644
--- a/include/soundtrigger/SoundTriggerCallback.h
+++ b/include/soundtrigger/SoundTriggerCallback.h
@@ -31,6 +31,10 @@
 
     virtual void onRecognitionEvent(struct sound_trigger_recognition_event *event) = 0;
 
+    virtual void onSoundModelEvent(struct sound_trigger_model_event *event) = 0;
+
+    virtual void onServiceStateChange(sound_trigger_service_state_t state) = 0;
+
     virtual void onServiceDied() = 0;
 
 };
diff --git a/media/libmedia/Android.mk b/media/libmedia/Android.mk
index cee26d9..3be0651 100644
--- a/media/libmedia/Android.mk
+++ b/media/libmedia/Android.mk
@@ -25,6 +25,7 @@
     AudioRecord.cpp \
     AudioSystem.cpp \
     mediaplayer.cpp \
+    IMediaCodecList.cpp \
     IMediaHTTPConnection.cpp \
     IMediaHTTPService.cpp \
     IMediaLogService.cpp \
@@ -36,6 +37,7 @@
     IRemoteDisplay.cpp \
     IRemoteDisplayClient.cpp \
     IStreamSource.cpp \
+    MediaCodecInfo.cpp \
     Metadata.cpp \
     mediarecorder.cpp \
     IMediaMetadataRetriever.cpp \
@@ -74,6 +76,7 @@
 
 LOCAL_C_INCLUDES := \
     $(TOP)/frameworks/native/include/media/openmax \
+    $(TOP)/frameworks/av/media/libstagefright \
     external/icu/icu4c/source/common \
     external/icu/icu4c/source/i18n \
     $(call include-path-for, audio-effects) \
diff --git a/media/libmedia/AudioSystem.cpp b/media/libmedia/AudioSystem.cpp
index 365a594..3486d21 100644
--- a/media/libmedia/AudioSystem.cpp
+++ b/media/libmedia/AudioSystem.cpp
@@ -450,6 +450,13 @@
     }
 }
 
+audio_hw_sync_t AudioSystem::getAudioHwSyncForSession(audio_session_t sessionId)
+{
+    const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+    if (af == 0) return AUDIO_HW_SYNC_INVALID;
+    return af->getAudioHwSyncForSession(sessionId);
+}
+
 // ---------------------------------------------------------------------------
 
 void AudioSystem::AudioFlingerClient::binderDied(const wp<IBinder>& who __unused)
@@ -913,6 +920,21 @@
     gAudioPortCallback = callBack;
 }
 
+status_t AudioSystem::acquireSoundTriggerSession(audio_session_t *session,
+                                       audio_io_handle_t *ioHandle,
+                                       audio_devices_t *device)
+{
+    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    if (aps == 0) return PERMISSION_DENIED;
+    return aps->acquireSoundTriggerSession(session, ioHandle, device);
+}
+
+status_t AudioSystem::releaseSoundTriggerSession(audio_session_t session)
+{
+    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    if (aps == 0) return PERMISSION_DENIED;
+    return aps->releaseSoundTriggerSession(session);
+}
 // ---------------------------------------------------------------------------
 
 void AudioSystem::AudioPolicyServiceClient::binderDied(const wp<IBinder>& who __unused)
diff --git a/media/libmedia/IAudioFlinger.cpp b/media/libmedia/IAudioFlinger.cpp
index 5331fce..346a192 100644
--- a/media/libmedia/IAudioFlinger.cpp
+++ b/media/libmedia/IAudioFlinger.cpp
@@ -79,7 +79,8 @@
     CREATE_AUDIO_PATCH,
     RELEASE_AUDIO_PATCH,
     LIST_AUDIO_PATCHES,
-    SET_AUDIO_PORT_CONFIG
+    SET_AUDIO_PORT_CONFIG,
+    GET_AUDIO_HW_SYNC
 };
 
 class BpAudioFlinger : public BpInterface<IAudioFlinger>
@@ -883,6 +884,17 @@
         }
         return status;
     }
+    virtual audio_hw_sync_t getAudioHwSyncForSession(audio_session_t sessionId)
+    {
+        Parcel data, reply;
+        data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
+        data.writeInt32(sessionId);
+        status_t status = remote()->transact(GET_AUDIO_HW_SYNC, data, &reply);
+        if (status != NO_ERROR) {
+            return AUDIO_HW_SYNC_INVALID;
+        }
+        return (audio_hw_sync_t)reply.readInt32();
+    }
 };
 
 IMPLEMENT_META_INTERFACE(AudioFlinger, "android.media.IAudioFlinger");
@@ -1345,6 +1357,11 @@
             reply->writeInt32(status);
             return NO_ERROR;
         } break;
+        case GET_AUDIO_HW_SYNC: {
+            CHECK_INTERFACE(IAudioFlinger, data, reply);
+            reply->writeInt32(getAudioHwSyncForSession((audio_session_t)data.readInt32()));
+            return NO_ERROR;
+        } break;
         default:
             return BBinder::onTransact(code, data, reply, flags);
     }
diff --git a/media/libmedia/IAudioPolicyService.cpp b/media/libmedia/IAudioPolicyService.cpp
index 1593b17..b57f747 100644
--- a/media/libmedia/IAudioPolicyService.cpp
+++ b/media/libmedia/IAudioPolicyService.cpp
@@ -65,7 +65,9 @@
     LIST_AUDIO_PATCHES,
     SET_AUDIO_PORT_CONFIG,
     REGISTER_CLIENT,
-    GET_OUTPUT_FOR_ATTR
+    GET_OUTPUT_FOR_ATTR,
+    ACQUIRE_SOUNDTRIGGER_SESSION,
+    RELEASE_SOUNDTRIGGER_SESSION
 };
 
 class BpAudioPolicyService : public BpInterface<IAudioPolicyService>
@@ -563,6 +565,7 @@
         }
         return status;
     }
+
     virtual void registerClient(const sp<IAudioPolicyServiceClient>& client)
     {
         Parcel data, reply;
@@ -570,6 +573,40 @@
         data.writeStrongBinder(client->asBinder());
         remote()->transact(REGISTER_CLIENT, data, &reply);
     }
+
+    virtual status_t acquireSoundTriggerSession(audio_session_t *session,
+                                            audio_io_handle_t *ioHandle,
+                                            audio_devices_t *device)
+    {
+        if (session == NULL || ioHandle == NULL || device == NULL) {
+            return BAD_VALUE;
+        }
+        Parcel data, reply;
+        data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+        status_t status = remote()->transact(ACQUIRE_SOUNDTRIGGER_SESSION, data, &reply);
+        if (status != NO_ERROR) {
+            return status;
+        }
+        status = (status_t)reply.readInt32();
+        if (status == NO_ERROR) {
+            *session = (audio_session_t)reply.readInt32();
+            *ioHandle = (audio_io_handle_t)reply.readInt32();
+            *device = (audio_devices_t)reply.readInt32();
+        }
+        return status;
+    }
+
+    virtual status_t releaseSoundTriggerSession(audio_session_t session)
+    {
+        Parcel data, reply;
+        data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+        data.writeInt32(session);
+        status_t status = remote()->transact(RELEASE_SOUNDTRIGGER_SESSION, data, &reply);
+        if (status != NO_ERROR) {
+            return status;
+        }
+        return (status_t)reply.readInt32();
+    }
 };
 
 IMPLEMENT_META_INTERFACE(AudioPolicyService, "android.media.IAudioPolicyService");
@@ -984,6 +1021,7 @@
             reply->writeInt32(status);
             return NO_ERROR;
         }
+
         case REGISTER_CLIENT: {
             CHECK_INTERFACE(IAudioPolicyService, data, reply);
             sp<IAudioPolicyServiceClient> client = interface_cast<IAudioPolicyServiceClient>(
@@ -992,6 +1030,33 @@
             return NO_ERROR;
         } break;
 
+        case ACQUIRE_SOUNDTRIGGER_SESSION: {
+            CHECK_INTERFACE(IAudioPolicyService, data, reply);
+            sp<IAudioPolicyServiceClient> client = interface_cast<IAudioPolicyServiceClient>(
+                    data.readStrongBinder());
+            audio_session_t session;
+            audio_io_handle_t ioHandle;
+            audio_devices_t device;
+            status_t status = acquireSoundTriggerSession(&session, &ioHandle, &device);
+            reply->writeInt32(status);
+            if (status == NO_ERROR) {
+                reply->writeInt32(session);
+                reply->writeInt32(ioHandle);
+                reply->writeInt32(device);
+            }
+            return NO_ERROR;
+        } break;
+
+        case RELEASE_SOUNDTRIGGER_SESSION: {
+            CHECK_INTERFACE(IAudioPolicyService, data, reply);
+            sp<IAudioPolicyServiceClient> client = interface_cast<IAudioPolicyServiceClient>(
+                    data.readStrongBinder());
+            audio_session_t session = (audio_session_t)data.readInt32();
+            status_t status = releaseSoundTriggerSession(session);
+            reply->writeInt32(status);
+            return NO_ERROR;
+        } break;
+
         default:
             return BBinder::onTransact(code, data, reply, flags);
     }
diff --git a/media/libmedia/IDrm.cpp b/media/libmedia/IDrm.cpp
index f1a6a9f..1904839 100644
--- a/media/libmedia/IDrm.cpp
+++ b/media/libmedia/IDrm.cpp
@@ -53,7 +53,8 @@
     SIGN,
     SIGN_RSA,
     VERIFY,
-    SET_LISTENER
+    SET_LISTENER,
+    UNPROVISION_DEVICE
 };
 
 struct BpDrm : public BpInterface<IDrm> {
@@ -229,6 +230,15 @@
         return reply.readInt32();
     }
 
+    virtual status_t unprovisionDevice() {
+        Parcel data, reply;
+        data.writeInterfaceToken(IDrm::getInterfaceDescriptor());
+
+        remote()->transact(UNPROVISION_DEVICE, data, &reply);
+
+        return reply.readInt32();
+    }
+
     virtual status_t getSecureStops(List<Vector<uint8_t> > &secureStops) {
         Parcel data, reply;
         data.writeInterfaceToken(IDrm::getInterfaceDescriptor());
@@ -619,6 +629,14 @@
             return OK;
         }
 
+        case UNPROVISION_DEVICE:
+        {
+            CHECK_INTERFACE(IDrm, data, reply);
+            status_t result = unprovisionDevice();
+            reply->writeInt32(result);
+            return OK;
+        }
+
         case GET_SECURE_STOPS:
         {
             CHECK_INTERFACE(IDrm, data, reply);
diff --git a/media/libmedia/IMediaCodecList.cpp b/media/libmedia/IMediaCodecList.cpp
new file mode 100644
index 0000000..bf7c5ca
--- /dev/null
+++ b/media/libmedia/IMediaCodecList.cpp
@@ -0,0 +1,163 @@
+/*
+ * Copyright (C) 2014, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdint.h>
+#include <sys/types.h>
+
+#include <binder/Parcel.h>
+#include <media/stagefright/MediaCodecList.h>
+#include <media/IMediaCodecList.h>
+#include <media/MediaCodecInfo.h>
+
+#include <utils/Errors.h>  // for status_t
+
+namespace android {
+
+enum {
+    CREATE = IBinder::FIRST_CALL_TRANSACTION,
+    COUNT_CODECS,
+    GET_CODEC_INFO,
+    FIND_CODEC_BY_TYPE,
+    FIND_CODEC_BY_NAME,
+};
+
+class BpMediaCodecList: public BpInterface<IMediaCodecList>
+{
+public:
+    BpMediaCodecList(const sp<IBinder>& impl)
+        : BpInterface<IMediaCodecList>(impl)
+    {
+    }
+
+    virtual size_t countCodecs() const
+    {
+        Parcel data, reply;
+        data.writeInterfaceToken(IMediaCodecList::getInterfaceDescriptor());
+        remote()->transact(COUNT_CODECS, data, &reply);
+        return static_cast<size_t>(reply.readInt32());
+    }
+
+    virtual sp<MediaCodecInfo> getCodecInfo(size_t index) const
+    {
+        Parcel data, reply;
+        data.writeInterfaceToken(IMediaCodecList::getInterfaceDescriptor());
+        data.writeInt32(index);
+        remote()->transact(GET_CODEC_INFO, data, &reply);
+        status_t err = reply.readInt32();
+        if (err == OK) {
+            return MediaCodecInfo::FromParcel(reply);
+        } else {
+            return NULL;
+        }
+    }
+
+    virtual ssize_t findCodecByType(
+            const char *type, bool encoder, size_t startIndex = 0) const
+    {
+        if (startIndex > INT32_MAX) {
+            return NAME_NOT_FOUND;
+        }
+
+        Parcel data, reply;
+        data.writeInterfaceToken(IMediaCodecList::getInterfaceDescriptor());
+        data.writeCString(type);
+        data.writeInt32(encoder);
+        data.writeInt32(startIndex);
+        remote()->transact(FIND_CODEC_BY_TYPE, data, &reply);
+        return static_cast<ssize_t>(reply.readInt32());
+    }
+
+    virtual ssize_t findCodecByName(const char *name) const
+    {
+        Parcel data, reply;
+        data.writeInterfaceToken(IMediaCodecList::getInterfaceDescriptor());
+        data.writeCString(name);
+        remote()->transact(FIND_CODEC_BY_NAME, data, &reply);
+        return static_cast<ssize_t>(reply.readInt32());
+    }
+};
+
+IMPLEMENT_META_INTERFACE(MediaCodecList, "android.media.IMediaCodecList");
+
+// ----------------------------------------------------------------------
+
+status_t BnMediaCodecList::onTransact(
+    uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
+{
+    switch (code) {
+        case COUNT_CODECS:
+        {
+            CHECK_INTERFACE(IMediaCodecList, data, reply);
+            size_t count = countCodecs();
+            if (count > INT32_MAX) {
+                count = INT32_MAX;
+            }
+            reply->writeInt32(count);
+            return NO_ERROR;
+        }
+        break;
+
+        case GET_CODEC_INFO:
+        {
+            CHECK_INTERFACE(IMediaCodecList, data, reply);
+            size_t index = static_cast<size_t>(data.readInt32());
+            const sp<MediaCodecInfo> info = getCodecInfo(index);
+            if (info != NULL) {
+                reply->writeInt32(OK);
+                info->writeToParcel(reply);
+            } else {
+                reply->writeInt32(-ERANGE);
+            }
+            return NO_ERROR;
+        }
+        break;
+
+        case FIND_CODEC_BY_TYPE:
+        {
+            CHECK_INTERFACE(IMediaCodecList, data, reply);
+            const char *type = data.readCString();
+            bool isEncoder = static_cast<bool>(data.readInt32());
+            size_t startIndex = static_cast<size_t>(data.readInt32());
+            ssize_t index = findCodecByType(type, isEncoder, startIndex);
+            if (index > INT32_MAX || index < 0) {
+                index = NAME_NOT_FOUND;
+            }
+            reply->writeInt32(index);
+            return NO_ERROR;
+        }
+        break;
+
+        case FIND_CODEC_BY_NAME:
+        {
+            CHECK_INTERFACE(IMediaCodecList, data, reply);
+            const char *name = data.readCString();
+            ssize_t index = findCodecByName(name);
+            if (index > INT32_MAX || index < 0) {
+                index = NAME_NOT_FOUND;
+            }
+            reply->writeInt32(index);
+            return NO_ERROR;
+        }
+        break;
+
+        default:
+            return BBinder::onTransact(code, data, reply, flags);
+    }
+}
+
+// ----------------------------------------------------------------------------
+
+}; // namespace android
diff --git a/media/libmedia/IMediaPlayerService.cpp b/media/libmedia/IMediaPlayerService.cpp
index d116b14..2e02d17 100644
--- a/media/libmedia/IMediaPlayerService.cpp
+++ b/media/libmedia/IMediaPlayerService.cpp
@@ -23,6 +23,7 @@
 #include <media/ICrypto.h>
 #include <media/IDrm.h>
 #include <media/IHDCP.h>
+#include <media/IMediaCodecList.h>
 #include <media/IMediaHTTPService.h>
 #include <media/IMediaPlayerService.h>
 #include <media/IMediaRecorder.h>
@@ -49,6 +50,7 @@
     ADD_BATTERY_DATA,
     PULL_BATTERY_DATA,
     LISTEN_FOR_REMOTE_DISPLAY,
+    GET_CODEC_LIST,
 };
 
 class BpMediaPlayerService: public BpInterface<IMediaPlayerService>
@@ -191,6 +193,13 @@
         remote()->transact(LISTEN_FOR_REMOTE_DISPLAY, data, &reply);
         return interface_cast<IRemoteDisplay>(reply.readStrongBinder());
     }
+
+    virtual sp<IMediaCodecList> getCodecList() const {
+        Parcel data, reply;
+        data.writeInterfaceToken(IMediaPlayerService::getInterfaceDescriptor());
+        remote()->transact(GET_CODEC_LIST, data, &reply);
+        return interface_cast<IMediaCodecList>(reply.readStrongBinder());
+    }
 };
 
 IMPLEMENT_META_INTERFACE(MediaPlayerService, "android.media.IMediaPlayerService");
@@ -318,6 +327,12 @@
             reply->writeStrongBinder(display->asBinder());
             return NO_ERROR;
         } break;
+        case GET_CODEC_LIST: {
+            CHECK_INTERFACE(IMediaPlayerService, data, reply);
+            sp<IMediaCodecList> mcl = getCodecList();
+            reply->writeStrongBinder(mcl->asBinder());
+            return NO_ERROR;
+        } break;
         default:
             return BBinder::onTransact(code, data, reply, flags);
     }
diff --git a/media/libmedia/MediaCodecInfo.cpp b/media/libmedia/MediaCodecInfo.cpp
new file mode 100644
index 0000000..7900eae
--- /dev/null
+++ b/media/libmedia/MediaCodecInfo.cpp
@@ -0,0 +1,252 @@
+/*
+ * Copyright 2014, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "MediaCodecInfo"
+#include <utils/Log.h>
+
+#include <media/IOMX.h>
+
+#include <media/MediaCodecInfo.h>
+
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <binder/Parcel.h>
+
+#include <media/stagefright/OMXCodec.h>
+
+namespace android {
+
+void MediaCodecInfo::Capabilities::getSupportedProfileLevels(
+        Vector<ProfileLevel> *profileLevels) const {
+    profileLevels->clear();
+    profileLevels->appendVector(mProfileLevels);
+}
+
+void MediaCodecInfo::Capabilities::getSupportedColorFormats(
+        Vector<uint32_t> *colorFormats) const {
+    colorFormats->clear();
+    colorFormats->appendVector(mColorFormats);
+}
+
+uint32_t MediaCodecInfo::Capabilities::getFlags() const {
+    return mFlags;
+}
+
+const sp<AMessage> &MediaCodecInfo::Capabilities::getDetails() const {
+    return mDetails;
+}
+
+MediaCodecInfo::Capabilities::Capabilities()
+  : mFlags(0) {
+    mDetails = new AMessage;
+}
+
+// static
+sp<MediaCodecInfo::Capabilities> MediaCodecInfo::Capabilities::FromParcel(
+        const Parcel &parcel) {
+    sp<MediaCodecInfo::Capabilities> caps = new Capabilities();
+    size_t size = static_cast<size_t>(parcel.readInt32());
+    for (size_t i = 0; i < size; i++) {
+        ProfileLevel profileLevel;
+        profileLevel.mProfile = static_cast<uint32_t>(parcel.readInt32());
+        profileLevel.mLevel = static_cast<uint32_t>(parcel.readInt32());
+        if (caps != NULL) {
+            caps->mProfileLevels.push_back(profileLevel);
+        }
+    }
+    size = static_cast<size_t>(parcel.readInt32());
+    for (size_t i = 0; i < size; i++) {
+        uint32_t color = static_cast<uint32_t>(parcel.readInt32());
+        if (caps != NULL) {
+            caps->mColorFormats.push_back(color);
+        }
+    }
+    uint32_t flags = static_cast<uint32_t>(parcel.readInt32());
+    sp<AMessage> details = AMessage::FromParcel(parcel);
+    if (caps != NULL) {
+        caps->mFlags = flags;
+        caps->mDetails = details;
+    }
+    return caps;
+}
+
+status_t MediaCodecInfo::Capabilities::writeToParcel(Parcel *parcel) const {
+    CHECK_LE(mProfileLevels.size(), INT32_MAX);
+    parcel->writeInt32(mProfileLevels.size());
+    for (size_t i = 0; i < mProfileLevels.size(); i++) {
+        parcel->writeInt32(mProfileLevels.itemAt(i).mProfile);
+        parcel->writeInt32(mProfileLevels.itemAt(i).mLevel);
+    }
+    CHECK_LE(mColorFormats.size(), INT32_MAX);
+    parcel->writeInt32(mColorFormats.size());
+    for (size_t i = 0; i < mColorFormats.size(); i++) {
+        parcel->writeInt32(mColorFormats.itemAt(i));
+    }
+    parcel->writeInt32(mFlags);
+    mDetails->writeToParcel(parcel);
+    return OK;
+}
+
+bool MediaCodecInfo::isEncoder() const {
+    return mIsEncoder;
+}
+
+bool MediaCodecInfo::hasQuirk(const char *name) const {
+    for (size_t ix = 0; ix < mQuirks.size(); ix++) {
+        if (mQuirks.itemAt(ix).equalsIgnoreCase(name)) {
+            return true;
+        }
+    }
+    return false;
+}
+
+void MediaCodecInfo::getSupportedMimes(Vector<AString> *mimes) const {
+    mimes->clear();
+    for (size_t ix = 0; ix < mCaps.size(); ix++) {
+        mimes->push_back(mCaps.keyAt(ix));
+    }
+}
+
+const sp<MediaCodecInfo::Capabilities> &
+MediaCodecInfo::getCapabilitiesFor(const char *mime) const {
+    ssize_t ix = getCapabilityIndex(mime);
+    if (ix >= 0) {
+        return mCaps.valueAt(ix);
+    }
+    return NULL;
+}
+
+const char *MediaCodecInfo::getCodecName() const {
+    return mName.c_str();
+}
+
+// static
+sp<MediaCodecInfo> MediaCodecInfo::FromParcel(const Parcel &parcel) {
+    AString name = AString::FromParcel(parcel);
+    bool isEncoder = static_cast<bool>(parcel.readInt32());
+    sp<MediaCodecInfo> info = new MediaCodecInfo(name, isEncoder, NULL);
+    size_t size = static_cast<size_t>(parcel.readInt32());
+    for (size_t i = 0; i < size; i++) {
+        AString quirk = AString::FromParcel(parcel);
+        if (info != NULL) {
+            info->mQuirks.push_back(quirk);
+        }
+    }
+    size = static_cast<size_t>(parcel.readInt32());
+    for (size_t i = 0; i < size; i++) {
+        AString mime = AString::FromParcel(parcel);
+        sp<Capabilities> caps = Capabilities::FromParcel(parcel);
+        if (info != NULL) {
+            info->mCaps.add(mime, caps);
+        }
+    }
+    return info;
+}
+
+status_t MediaCodecInfo::writeToParcel(Parcel *parcel) const {
+    mName.writeToParcel(parcel);
+    parcel->writeInt32(mIsEncoder);
+    parcel->writeInt32(mQuirks.size());
+    for (size_t i = 0; i < mQuirks.size(); i++) {
+        mQuirks.itemAt(i).writeToParcel(parcel);
+    }
+    parcel->writeInt32(mCaps.size());
+    for (size_t i = 0; i < mCaps.size(); i++) {
+        mCaps.keyAt(i).writeToParcel(parcel);
+        mCaps.valueAt(i)->writeToParcel(parcel);
+    }
+    return OK;
+}
+
+ssize_t MediaCodecInfo::getCapabilityIndex(const char *mime) const {
+    for (size_t ix = 0; ix < mCaps.size(); ix++) {
+        if (mCaps.keyAt(ix).equalsIgnoreCase(mime)) {
+            return ix;
+        }
+    }
+    return -1;
+}
+
+MediaCodecInfo::MediaCodecInfo(AString name, bool encoder, const char *mime)
+    : mName(name),
+      mIsEncoder(encoder),
+      mHasSoleMime(false) {
+    if (mime != NULL) {
+        addMime(mime);
+        mHasSoleMime = true;
+    }
+}
+
+status_t MediaCodecInfo::addMime(const char *mime) {
+    if (mHasSoleMime) {
+        ALOGE("Codec '%s' already had its type specified", mName.c_str());
+        return -EINVAL;
+    }
+    ssize_t ix = getCapabilityIndex(mime);
+    if (ix >= 0) {
+        mCurrentCaps = mCaps.valueAt(ix);
+    } else {
+        mCurrentCaps = new Capabilities();
+        mCaps.add(AString(mime), mCurrentCaps);
+    }
+    return OK;
+}
+
+status_t MediaCodecInfo::initializeCapabilities(const CodecCapabilities &caps) {
+    mCurrentCaps->mProfileLevels.clear();
+    mCurrentCaps->mColorFormats.clear();
+
+    for (size_t i = 0; i < caps.mProfileLevels.size(); ++i) {
+        const CodecProfileLevel &src = caps.mProfileLevels.itemAt(i);
+
+        ProfileLevel profileLevel;
+        profileLevel.mProfile = src.mProfile;
+        profileLevel.mLevel = src.mLevel;
+        mCurrentCaps->mProfileLevels.push_back(profileLevel);
+    }
+
+    for (size_t i = 0; i < caps.mColorFormats.size(); ++i) {
+        mCurrentCaps->mColorFormats.push_back(caps.mColorFormats.itemAt(i));
+    }
+
+    mCurrentCaps->mFlags = caps.mFlags;
+    mCurrentCaps->mDetails = new AMessage;
+
+    return OK;
+}
+
+void MediaCodecInfo::addQuirk(const char *name) {
+    if (!hasQuirk(name)) {
+        mQuirks.push(name);
+    }
+}
+
+void MediaCodecInfo::complete() {
+    mCurrentCaps = NULL;
+}
+
+void MediaCodecInfo::addDetail(const AString &key, const AString &value) {
+    mCurrentCaps->mDetails->setString(key.c_str(), value.c_str());
+}
+
+void MediaCodecInfo::addFeature(const AString &key, int32_t value) {
+    AString tag = "feature-";
+    tag.append(key);
+    mCurrentCaps->mDetails->setInt32(tag.c_str(), value);
+}
+
+}  // namespace android
diff --git a/media/libmediaplayerservice/Drm.cpp b/media/libmediaplayerservice/Drm.cpp
index d50037f..d222316 100644
--- a/media/libmediaplayerservice/Drm.cpp
+++ b/media/libmediaplayerservice/Drm.cpp
@@ -417,6 +417,23 @@
     return mPlugin->provideProvisionResponse(response, certificate, wrappedKey);
 }
 
+status_t Drm::unprovisionDevice() {
+    Mutex::Autolock autoLock(mLock);
+
+    if (mInitCheck != OK) {
+        return mInitCheck;
+    }
+
+    if (mPlugin == NULL) {
+        return -EINVAL;
+    }
+
+    if (!checkPermission("android.permission.REMOVE_DRM_CERTIFICATES")) {
+        return -EPERM;
+    }
+
+    return mPlugin->unprovisionDevice();
+}
 
 status_t Drm::getSecureStops(List<Vector<uint8_t> > &secureStops) {
     Mutex::Autolock autoLock(mLock);
diff --git a/media/libmediaplayerservice/Drm.h b/media/libmediaplayerservice/Drm.h
index 3d4b0fc..9e23e2e 100644
--- a/media/libmediaplayerservice/Drm.h
+++ b/media/libmediaplayerservice/Drm.h
@@ -75,6 +75,8 @@
                                               Vector<uint8_t> &certificate,
                                               Vector<uint8_t> &wrappedKey);
 
+    virtual status_t unprovisionDevice();
+
     virtual status_t getSecureStops(List<Vector<uint8_t> > &secureStops);
 
     virtual status_t releaseSecureStops(Vector<uint8_t> const &ssRelease);
diff --git a/media/libmediaplayerservice/MediaPlayerService.cpp b/media/libmediaplayerservice/MediaPlayerService.cpp
index 735344c..a706987 100644
--- a/media/libmediaplayerservice/MediaPlayerService.cpp
+++ b/media/libmediaplayerservice/MediaPlayerService.cpp
@@ -54,6 +54,7 @@
 #include <media/Metadata.h>
 #include <media/AudioTrack.h>
 #include <media/MemoryLeakTrackUtil.h>
+#include <media/stagefright/MediaCodecList.h>
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/AudioPlayer.h>
 #include <media/stagefright/foundation/ADebug.h>
@@ -345,6 +346,10 @@
     return c;
 }
 
+sp<IMediaCodecList> MediaPlayerService::getCodecList() const {
+    return MediaCodecList::getLocalInstance();
+}
+
 sp<IOMX> MediaPlayerService::getOMX() {
     Mutex::Autolock autoLock(mLock);
 
diff --git a/media/libmediaplayerservice/MediaPlayerService.h b/media/libmediaplayerservice/MediaPlayerService.h
index 2eca6a0..406e3f6 100644
--- a/media/libmediaplayerservice/MediaPlayerService.h
+++ b/media/libmediaplayerservice/MediaPlayerService.h
@@ -273,6 +273,7 @@
                                        uint32_t *pSampleRate, int* pNumChannels,
                                        audio_format_t* pFormat,
                                        const sp<IMemoryHeap>& heap, size_t *pSize);
+    virtual sp<IMediaCodecList> getCodecList() const;
     virtual sp<IOMX>            getOMX();
     virtual sp<ICrypto>         makeCrypto();
     virtual sp<IDrm>            makeDrm();
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.cpp b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
index 2f5b0f1..1616448 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
@@ -14,10 +14,14 @@
  * limitations under the License.
  */
 
+//#define LOG_NDEBUG 0
+#define LOG_TAG "GenericSource"
+
 #include "GenericSource.h"
 
 #include "AnotherPacketSource.h"
 
+#include <media/IMediaHTTPService.h>
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AMessage.h>
@@ -34,45 +38,63 @@
 
 NuPlayer::GenericSource::GenericSource(
         const sp<AMessage> &notify,
-        const sp<IMediaHTTPService> &httpService,
-        const char *url,
-        const KeyedVector<String8, String8> *headers,
-        bool isWidevine,
         bool uidValid,
         uid_t uid)
     : Source(notify),
       mFetchSubtitleDataGeneration(0),
+      mFetchTimedTextDataGeneration(0),
       mDurationUs(0ll),
       mAudioIsVorbis(false),
-      mIsWidevine(isWidevine),
+      mIsWidevine(false),
       mUIDValid(uidValid),
       mUID(uid) {
+    resetDataSource();
     DataSource::RegisterDefaultSniffers();
-
-    sp<DataSource> dataSource =
-        DataSource::CreateFromURI(httpService, url, headers);
-    CHECK(dataSource != NULL);
-
-    initFromDataSource(dataSource);
 }
 
-NuPlayer::GenericSource::GenericSource(
-        const sp<AMessage> &notify,
-        int fd, int64_t offset, int64_t length)
-    : Source(notify),
-      mFetchSubtitleDataGeneration(0),
-      mDurationUs(0ll),
-      mAudioIsVorbis(false),
-      mIsWidevine(false) {
-    DataSource::RegisterDefaultSniffers();
-
-    sp<DataSource> dataSource = new FileSource(dup(fd), offset, length);
-
-    initFromDataSource(dataSource);
+void NuPlayer::GenericSource::resetDataSource() {
+    mHTTPService.clear();
+    mUri.clear();
+    mUriHeaders.clear();
+    mFd = -1;
+    mOffset = 0;
+    mLength = 0;
 }
 
-void NuPlayer::GenericSource::initFromDataSource(
-        const sp<DataSource> &dataSource) {
+status_t NuPlayer::GenericSource::setDataSource(
+        const sp<IMediaHTTPService> &httpService,
+        const char *url,
+        const KeyedVector<String8, String8> *headers) {
+    resetDataSource();
+
+    mHTTPService = httpService;
+    mUri = url;
+
+    if (headers) {
+        mUriHeaders = *headers;
+    }
+
+    // delay data source creation to prepareAsync() to avoid blocking
+    // the calling thread in setDataSource for any significant time.
+    return OK;
+}
+
+status_t NuPlayer::GenericSource::setDataSource(
+        int fd, int64_t offset, int64_t length) {
+    resetDataSource();
+
+    mFd = dup(fd);
+    mOffset = offset;
+    mLength = length;
+
+    // delay data source creation to prepareAsync() to avoid blocking
+    // the calling thread in setDataSource for any significant time.
+    return OK;
+}
+
+status_t NuPlayer::GenericSource::initFromDataSource(
+        const sp<DataSource> &dataSource,
+        const char* mime) {
     sp<MediaExtractor> extractor;
 
     if (mIsWidevine) {
@@ -86,7 +108,7 @@
                 || strcasecmp(
                     mimeType.string(), MEDIA_MIMETYPE_CONTAINER_WVM)) {
             ALOGE("unsupported widevine mime: %s", mimeType.string());
-            return;
+            return UNKNOWN_ERROR;
         }
 
         sp<WVMExtractor> wvmExtractor = new WVMExtractor(dataSource);
@@ -96,10 +118,12 @@
         }
         extractor = wvmExtractor;
     } else {
-        extractor = MediaExtractor::Create(dataSource);
+        extractor = MediaExtractor::Create(dataSource, mime);
     }
 
-    CHECK(extractor != NULL);
+    if (extractor == NULL) {
+        return UNKNOWN_ERROR;
+    }
 
     sp<MetaData> fileMeta = extractor->getMetaData();
     if (fileMeta != NULL) {
@@ -132,6 +156,16 @@
             if (mVideoTrack.mSource == NULL) {
                 mVideoTrack.mIndex = i;
                 mVideoTrack.mSource = track;
+
+                // check if the source requires secure buffers
+                int32_t secure;
+                if (meta->findInt32(kKeyRequiresSecureBuffers, &secure)
+                        && secure) {
+                    mIsWidevine = true;
+                    if (mUIDValid) {
+                        extractor->setUID(mUID);
+                    }
+                }
             }
         }
 
@@ -145,9 +179,12 @@
             }
         }
     }
+
+    return OK;
 }
 
-status_t NuPlayer::GenericSource::setBuffers(bool audio, Vector<MediaBuffer *> &buffers) {
+status_t NuPlayer::GenericSource::setBuffers(
+        bool audio, Vector<MediaBuffer *> &buffers) {
     if (mIsWidevine && !audio) {
         return mVideoTrack.mSource->setBuffers(buffers);
     }
@@ -158,6 +195,38 @@
 }
 
 void NuPlayer::GenericSource::prepareAsync() {
+    // delayed data source creation
+    AString sniffedMIME;
+    sp<DataSource> dataSource;
+
+    if (!mUri.empty()) {
+        mIsWidevine = !strncasecmp(mUri.c_str(), "widevine://", 11);
+
+        dataSource = DataSource::CreateFromURI(
+               mHTTPService, mUri.c_str(), &mUriHeaders, &sniffedMIME);
+    } else {
+        // set to false first, if the extractor
+        // comes back as secure, set it to true then.
+        mIsWidevine = false;
+
+        dataSource = new FileSource(mFd, mOffset, mLength);
+    }
+
+    if (dataSource == NULL) {
+        ALOGE("Failed to create data source!");
+        notifyPrepared(UNKNOWN_ERROR);
+        return;
+    }
+
+    status_t err = initFromDataSource(
+            dataSource, sniffedMIME.empty() ? NULL : sniffedMIME.c_str());
+
+    if (err != OK) {
+        ALOGE("Failed to init from data source!");
+        notifyPrepared(err);
+        return;
+    }
+
     if (mVideoTrack.mSource != NULL) {
         sp<MetaData> meta = mVideoTrack.mSource->getFormat();
 
@@ -206,66 +275,29 @@
     switch (msg->what()) {
       case kWhatFetchSubtitleData:
       {
-          int32_t generation;
-          CHECK(msg->findInt32("generation", &generation));
-          if (generation != mFetchSubtitleDataGeneration) {
-              // stale
-              break;
-          }
+          fetchTextData(kWhatSendSubtitleData, MEDIA_TRACK_TYPE_SUBTITLE,
+                  mFetchSubtitleDataGeneration, mSubtitleTrack.mPackets, msg);
+          break;
+      }
 
-          int32_t avail;
-          if (mSubtitleTrack.mPackets->hasBufferAvailable(&avail)) {
-              break;
-          }
-
-          int64_t timeUs;
-          CHECK(msg->findInt64("timeUs", &timeUs));
-
-          int64_t subTimeUs;
-          readBuffer(MEDIA_TRACK_TYPE_SUBTITLE, timeUs, &subTimeUs);
-
-          const int64_t oneSecUs = 1000000ll;
-          const int64_t delayUs = subTimeUs - timeUs - oneSecUs;
-          sp<AMessage> msg2 = new AMessage(kWhatSendSubtitleData, id());
-          msg2->setInt32("generation", generation);
-          msg2->post(delayUs < 0 ? 0 : delayUs);
-          ALOGV("kWhatFetchSubtitleData generation %d, delayUs %lld",
-                  mFetchSubtitleDataGeneration, delayUs);
-
+      case kWhatFetchTimedTextData:
+      {
+          fetchTextData(kWhatSendTimedTextData, MEDIA_TRACK_TYPE_TIMEDTEXT,
+                  mFetchTimedTextDataGeneration, mTimedTextTrack.mPackets, msg);
           break;
       }
 
       case kWhatSendSubtitleData:
       {
-          int32_t generation;
-          CHECK(msg->findInt32("generation", &generation));
-          if (generation != mFetchSubtitleDataGeneration) {
-              // stale
-              break;
-          }
+          sendTextData(kWhatSubtitleData, MEDIA_TRACK_TYPE_SUBTITLE,
+                  mFetchSubtitleDataGeneration, mSubtitleTrack.mPackets, msg);
+          break;
+      }
 
-          int64_t subTimeUs;
-          if (mSubtitleTrack.mPackets->nextBufferTime(&subTimeUs) != OK) {
-              break;
-          }
-
-          int64_t nextSubTimeUs;
-          readBuffer(MEDIA_TRACK_TYPE_SUBTITLE, -1, &nextSubTimeUs);
-
-          sp<ABuffer> buffer;
-          status_t dequeueStatus = mSubtitleTrack.mPackets->dequeueAccessUnit(&buffer);
-          if (dequeueStatus != OK) {
-              ALOGE("kWhatSendSubtitleData dequeueAccessUnit: %d", dequeueStatus);
-          } else {
-              sp<AMessage> notify = dupNotify();
-              notify->setInt32("what", kWhatSubtitleData);
-              notify->setBuffer("buffer", buffer);
-              notify->post();
-
-              const int64_t delayUs = nextSubTimeUs - subTimeUs;
-              msg->post(delayUs < 0 ? 0 : delayUs);
-          }
-
+      case kWhatSendTimedTextData:
+      {
+          sendTextData(kWhatTimedTextData, MEDIA_TRACK_TYPE_TIMEDTEXT,
+                  mFetchTimedTextDataGeneration, mTimedTextTrack.mPackets, msg);
           break;
       }
 
@@ -308,7 +340,7 @@
 
           int64_t timeUs, actualTimeUs;
           const bool formatChange = true;
-          sp<AMessage> latestMeta = track->mPackets->getLatestMeta();
+          sp<AMessage> latestMeta = track->mPackets->getLatestEnqueuedMeta();
           CHECK(latestMeta != NULL && latestMeta->findInt64("timeUs", &timeUs));
           readBuffer(trackType, timeUs, &actualTimeUs, formatChange);
           readBuffer(counterpartType, -1, NULL, formatChange);
@@ -323,6 +355,74 @@
     }
 }
 
+void NuPlayer::GenericSource::fetchTextData(
+        uint32_t sendWhat,
+        media_track_type type,
+        int32_t curGen,
+        sp<AnotherPacketSource> packets,
+        sp<AMessage> msg) {
+    int32_t msgGeneration;
+    CHECK(msg->findInt32("generation", &msgGeneration));
+    if (msgGeneration != curGen) {
+        // stale
+        return;
+    }
+
+    int32_t avail;
+    if (packets->hasBufferAvailable(&avail)) {
+        return;
+    }
+
+    int64_t timeUs;
+    CHECK(msg->findInt64("timeUs", &timeUs));
+
+    int64_t subTimeUs;
+    readBuffer(type, timeUs, &subTimeUs);
+
+    int64_t delayUs = subTimeUs - timeUs;
+    if (msg->what() == kWhatFetchSubtitleData) {
+        const int64_t oneSecUs = 1000000ll;
+        delayUs -= oneSecUs;
+    }
+    sp<AMessage> msg2 = new AMessage(sendWhat, id());
+    msg2->setInt32("generation", msgGeneration);
+    msg2->post(delayUs < 0 ? 0 : delayUs);
+}
+
+void NuPlayer::GenericSource::sendTextData(
+        uint32_t what,
+        media_track_type type,
+        int32_t curGen,
+        sp<AnotherPacketSource> packets,
+        sp<AMessage> msg) {
+    int32_t msgGeneration;
+    CHECK(msg->findInt32("generation", &msgGeneration));
+    if (msgGeneration != curGen) {
+        // stale
+        return;
+    }
+
+    int64_t subTimeUs;
+    if (packets->nextBufferTime(&subTimeUs) != OK) {
+        return;
+    }
+
+    int64_t nextSubTimeUs;
+    readBuffer(type, -1, &nextSubTimeUs);
+
+    sp<ABuffer> buffer;
+    status_t dequeueStatus = packets->dequeueAccessUnit(&buffer);
+    if (dequeueStatus == OK) {
+        sp<AMessage> notify = dupNotify();
+        notify->setInt32("what", what);
+        notify->setBuffer("buffer", buffer);
+        notify->post();
+
+        const int64_t delayUs = nextSubTimeUs - subTimeUs;
+        msg->post(delayUs < 0 ? 0 : delayUs);
+    }
+}
+
 sp<MetaData> NuPlayer::GenericSource::getFormatMeta(bool audio) {
     sp<MediaSource> source = audio ? mAudioTrack.mSource : mVideoTrack.mSource;
 
@@ -357,27 +457,49 @@
         readBuffer(audio? MEDIA_TRACK_TYPE_AUDIO : MEDIA_TRACK_TYPE_VIDEO, -1ll);
     }
 
-    if (mSubtitleTrack.mSource == NULL) {
+    if (mSubtitleTrack.mSource == NULL && mTimedTextTrack.mSource == NULL) {
         return result;
     }
 
-    CHECK(mSubtitleTrack.mPackets != NULL);
+    if (mSubtitleTrack.mSource != NULL) {
+        CHECK(mSubtitleTrack.mPackets != NULL);
+    }
+    if (mTimedTextTrack.mSource != NULL) {
+        CHECK(mTimedTextTrack.mPackets != NULL);
+    }
+
     if (result != OK) {
-        mSubtitleTrack.mPackets->clear();
-        mFetchSubtitleDataGeneration++;
+        if (mSubtitleTrack.mSource != NULL) {
+            mSubtitleTrack.mPackets->clear();
+            mFetchSubtitleDataGeneration++;
+        }
+        if (mTimedTextTrack.mSource != NULL) {
+            mTimedTextTrack.mPackets->clear();
+            mFetchTimedTextDataGeneration++;
+        }
         return result;
     }
 
     int64_t timeUs;
     status_t eosResult; // ignored
     CHECK((*accessUnit)->meta()->findInt64("timeUs", &timeUs));
-    if (!mSubtitleTrack.mPackets->hasBufferAvailable(&eosResult)) {
+
+    if (mSubtitleTrack.mSource != NULL
+            && !mSubtitleTrack.mPackets->hasBufferAvailable(&eosResult)) {
         sp<AMessage> msg = new AMessage(kWhatFetchSubtitleData, id());
         msg->setInt64("timeUs", timeUs);
         msg->setInt32("generation", mFetchSubtitleDataGeneration);
         msg->post();
     }
 
+    if (mTimedTextTrack.mSource != NULL
+            && !mTimedTextTrack.mPackets->hasBufferAvailable(&eosResult)) {
+        sp<AMessage> msg = new AMessage(kWhatFetchTimedTextData, id());
+        msg->setInt64("timeUs", timeUs);
+        msg->setInt32("generation", mFetchTimedTextDataGeneration);
+        msg->post();
+    }
+
     return result;
 }
 
@@ -436,20 +558,53 @@
     return format;
 }
 
+ssize_t NuPlayer::GenericSource::getSelectedTrack(media_track_type type) const {
+    const Track *track = NULL;
+    switch (type) {
+    case MEDIA_TRACK_TYPE_VIDEO:
+        track = &mVideoTrack;
+        break;
+    case MEDIA_TRACK_TYPE_AUDIO:
+        track = &mAudioTrack;
+        break;
+    case MEDIA_TRACK_TYPE_TIMEDTEXT:
+        track = &mTimedTextTrack;
+        break;
+    case MEDIA_TRACK_TYPE_SUBTITLE:
+        track = &mSubtitleTrack;
+        break;
+    default:
+        break;
+    }
+
+    if (track != NULL && track->mSource != NULL) {
+        return track->mIndex;
+    }
+
+    return -1;
+}
+
 status_t NuPlayer::GenericSource::selectTrack(size_t trackIndex, bool select) {
-    ALOGV("selectTrack: %zu", trackIndex);
+    ALOGV("%s track: %zu", select ? "select" : "deselect", trackIndex);
     if (trackIndex >= mSources.size()) {
         return BAD_INDEX;
     }
 
     if (!select) {
-        if (mSubtitleTrack.mSource == NULL || trackIndex != mSubtitleTrack.mIndex) {
+        Track* track = NULL;
+        if (mSubtitleTrack.mSource != NULL && trackIndex == mSubtitleTrack.mIndex) {
+            track = &mSubtitleTrack;
+            mFetchSubtitleDataGeneration++;
+        } else if (mTimedTextTrack.mSource != NULL && trackIndex == mTimedTextTrack.mIndex) {
+            track = &mTimedTextTrack;
+            mFetchTimedTextDataGeneration++;
+        }
+        if (track == NULL) {
             return INVALID_OPERATION;
         }
-        mSubtitleTrack.mSource->stop();
-        mSubtitleTrack.mSource = NULL;
-        mSubtitleTrack.mPackets->clear();
-        mFetchSubtitleDataGeneration++;
+        track->mSource->stop();
+        track->mSource = NULL;
+        track->mPackets->clear();
         return OK;
     }
 
@@ -458,22 +613,31 @@
     const char *mime;
     CHECK(meta->findCString(kKeyMIMEType, &mime));
     if (!strncasecmp(mime, "text/", 5)) {
-        if (mSubtitleTrack.mSource != NULL && mSubtitleTrack.mIndex == trackIndex) {
+        bool isSubtitle = strcasecmp(mime, MEDIA_MIMETYPE_TEXT_3GPP);
+        Track *track = isSubtitle ? &mSubtitleTrack : &mTimedTextTrack;
+        if (track->mSource != NULL && track->mIndex == trackIndex) {
             return OK;
         }
-        mSubtitleTrack.mIndex = trackIndex;
-        if (mSubtitleTrack.mSource != NULL) {
-            mSubtitleTrack.mSource->stop();
+        track->mIndex = trackIndex;
+        if (track->mSource != NULL) {
+            track->mSource->stop();
         }
-        mSubtitleTrack.mSource = mSources.itemAt(trackIndex);
-        mSubtitleTrack.mSource->start();
-        if (mSubtitleTrack.mPackets == NULL) {
-            mSubtitleTrack.mPackets = new AnotherPacketSource(mSubtitleTrack.mSource->getFormat());
+        track->mSource = mSources.itemAt(trackIndex);
+        track->mSource->start();
+        if (track->mPackets == NULL) {
+            track->mPackets = new AnotherPacketSource(track->mSource->getFormat());
         } else {
-            mSubtitleTrack.mPackets->clear();
+            track->mPackets->clear();
+            track->mPackets->setFormat(track->mSource->getFormat());
 
         }
-        mFetchSubtitleDataGeneration++;
+
+        if (isSubtitle) {
+            mFetchSubtitleDataGeneration++;
+        } else {
+            mFetchTimedTextDataGeneration++;
+        }
+
         return OK;
     } else if (!strncasecmp(mime, "audio/", 6) || !strncasecmp(mime, "video/", 6)) {
         bool audio = !strncasecmp(mime, "audio/", 6);
@@ -540,12 +704,19 @@
         memcpy(abEnd, &numPageSamples, sizeof(numPageSamples));
     }
 
+    sp<AMessage> meta = ab->meta();
+
     int64_t timeUs;
     CHECK(mb->meta_data()->findInt64(kKeyTime, &timeUs));
-
-    sp<AMessage> meta = ab->meta();
     meta->setInt64("timeUs", timeUs);
 
+    if (trackType == MEDIA_TRACK_TYPE_TIMEDTEXT) {
+        const char *mime;
+        CHECK(mTimedTextTrack.mSource != NULL
+                && mTimedTextTrack.mSource->getFormat()->findCString(kKeyMIMEType, &mime));
+        meta->setString("mime", mime);
+    }
+
     int64_t durationUs;
     if (mb->meta_data()->findInt64(kKeyDuration, &durationUs)) {
         meta->setInt64("durationUs", durationUs);
@@ -578,6 +749,9 @@
         case MEDIA_TRACK_TYPE_SUBTITLE:
             track = &mSubtitleTrack;
             break;
+        case MEDIA_TRACK_TYPE_TIMEDTEXT:
+            track = &mTimedTextTrack;
+            break;
         default:
             TRESPASS();
     }
@@ -613,7 +787,9 @@
             // formatChange && seeking: track whose source is changed during selection
             // formatChange && !seeking: track whose source is not changed during selection
             // !formatChange: normal seek
-            if ((seeking || formatChange) && trackType != MEDIA_TRACK_TYPE_SUBTITLE) {
+            if ((seeking || formatChange)
+                    && (trackType == MEDIA_TRACK_TYPE_AUDIO
+                    || trackType == MEDIA_TRACK_TYPE_VIDEO)) {
                 ATSParser::DiscontinuityType type = formatChange
                         ? (seeking
                                 ? ATSParser::DISCONTINUITY_FORMATCHANGE
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.h b/media/libmediaplayerservice/nuplayer/GenericSource.h
index 4e25d55..44d690e 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.h
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.h
@@ -30,22 +30,19 @@
 struct AnotherPacketSource;
 struct ARTSPController;
 struct DataSource;
+struct IMediaHTTPService;
 struct MediaSource;
 class MediaBuffer;
 
 struct NuPlayer::GenericSource : public NuPlayer::Source {
-    GenericSource(
-            const sp<AMessage> &notify,
+    GenericSource(const sp<AMessage> &notify, bool uidValid, uid_t uid);
+
+    status_t setDataSource(
             const sp<IMediaHTTPService> &httpService,
             const char *url,
-            const KeyedVector<String8, String8> *headers,
-            bool isWidevine = false,
-            bool uidValid = false,
-            uid_t uid = 0);
+            const KeyedVector<String8, String8> *headers);
 
-    GenericSource(
-            const sp<AMessage> &notify,
-            int fd, int64_t offset, int64_t length);
+    status_t setDataSource(int fd, int64_t offset, int64_t length);
 
     virtual void prepareAsync();
 
@@ -58,6 +55,7 @@
     virtual status_t getDuration(int64_t *durationUs);
     virtual size_t getTrackCount() const;
     virtual sp<AMessage> getTrackInfo(size_t trackIndex) const;
+    virtual ssize_t getSelectedTrack(media_track_type type) const;
     virtual status_t selectTrack(size_t trackIndex, bool select);
     virtual status_t seekTo(int64_t seekTimeUs);
 
@@ -73,7 +71,9 @@
 private:
     enum {
         kWhatFetchSubtitleData,
+        kWhatFetchTimedTextData,
         kWhatSendSubtitleData,
+        kWhatSendTimedTextData,
         kWhatChangeAVSource,
     };
 
@@ -88,15 +88,35 @@
     Track mAudioTrack;
     Track mVideoTrack;
     Track mSubtitleTrack;
+    Track mTimedTextTrack;
 
     int32_t mFetchSubtitleDataGeneration;
+    int32_t mFetchTimedTextDataGeneration;
     int64_t mDurationUs;
     bool mAudioIsVorbis;
     bool mIsWidevine;
     bool mUIDValid;
     uid_t mUID;
+    sp<IMediaHTTPService> mHTTPService;
+    AString mUri;
+    KeyedVector<String8, String8> mUriHeaders;
+    int mFd;
+    int64_t mOffset;
+    int64_t mLength;
 
-    void initFromDataSource(const sp<DataSource> &dataSource);
+    void resetDataSource();
+
+    status_t initFromDataSource(
+            const sp<DataSource> &dataSource,
+            const char *mime);
+
+    void fetchTextData(
+            uint32_t what, media_track_type type,
+            int32_t curGen, sp<AnotherPacketSource> packets, sp<AMessage> msg);
+
+    void sendTextData(
+            uint32_t what, media_track_type type,
+            int32_t curGen, sp<AnotherPacketSource> packets, sp<AMessage> msg);
 
     sp<ABuffer> mediaBufferToABuffer(
             MediaBuffer *mbuf,
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
index 58d0138..fe115c6 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
@@ -207,6 +207,7 @@
         const sp<IMediaHTTPService> &httpService,
         const char *url,
         const KeyedVector<String8, String8> *headers) {
+
     sp<AMessage> msg = new AMessage(kWhatSetDataSource, id());
     size_t len = strlen(url);
 
@@ -224,14 +225,21 @@
                     || strstr(url, ".sdp?"))) {
         source = new RTSPSource(
                 notify, httpService, url, headers, mUIDValid, mUID, true);
-    } else if ((!strncasecmp(url, "widevine://", 11))) {
-        source = new GenericSource(notify, httpService, url, headers,
-                true /* isWidevine */, mUIDValid, mUID);
-        mSourceFlags |= Source::FLAG_SECURE;
     } else {
-        source = new GenericSource(notify, httpService, url, headers);
-    }
+        sp<GenericSource> genericSource =
+                new GenericSource(notify, mUIDValid, mUID);
+        // Don't set FLAG_SECURE on mSourceFlags here for widevine.
+        // The correct flags will be updated in Source::kWhatFlagsChanged
+        // handler when  GenericSource is prepared.
 
+        status_t err = genericSource->setDataSource(httpService, url, headers);
+
+        if (err == OK) {
+            source = genericSource;
+        } else {
+            ALOGE("Failed to set data source!");
+        }
+    }
     msg->setObject("source", source);
     msg->post();
 }
@@ -241,7 +249,16 @@
 
     sp<AMessage> notify = new AMessage(kWhatSourceNotify, id());
 
-    sp<Source> source = new GenericSource(notify, fd, offset, length);
+    sp<GenericSource> source =
+            new GenericSource(notify, mUIDValid, mUID);
+
+    status_t err = source->setDataSource(fd, offset, length);
+
+    if (err != OK) {
+        ALOGE("Failed to set data source!");
+        source = NULL;
+    }
+
     msg->setObject("source", source);
     msg->post();
 }
@@ -350,17 +367,20 @@
 
             CHECK(mSource == NULL);
 
+            status_t err = OK;
             sp<RefBase> obj;
             CHECK(msg->findObject("source", &obj));
-
-            mSource = static_cast<Source *>(obj.get());
-
-            looper()->registerHandler(mSource);
+            if (obj != NULL) {
+                mSource = static_cast<Source *>(obj.get());
+                looper()->registerHandler(mSource);
+            } else {
+                err = UNKNOWN_ERROR;
+            }
 
             CHECK(mDriver != NULL);
             sp<NuPlayerDriver> driver = mDriver.promote();
             if (driver != NULL) {
-                driver->notifySetDataSourceCompleted(OK);
+                driver->notifySetDataSourceCompleted(err);
             }
             break;
         }
@@ -749,6 +769,15 @@
                             ALOGV("Mime \"%s\" mapped to audio_format 0x%x",
                                     mime.c_str(), audioFormat);
 
+                            int32_t aacProfile = -1;
+                            if (audioFormat == AUDIO_FORMAT_AAC
+                                    && format->findInt32("aac-profile", &aacProfile)) {
+                                // Redefine AAC format as per aac profile
+                                mapAACProfileToAudioFormat(
+                                        audioFormat,
+                                        aacProfile);
+                            }
+
                             flags |= AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD;
 
                             offloadInfo.duration_us = -1;
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index 1808d07..503ce81 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -51,6 +51,48 @@
 
 namespace android {
 
+// OMX errors are directly mapped into status_t range if
+// there is no corresponding MediaError status code.
+// Use the statusFromOMXError(int32_t omxError) function.
+//
+// Currently this is a direct map.
+// See frameworks/native/include/media/openmax/OMX_Core.h
+//
+// Vendor OMX errors     from 0x90000000 - 0x9000FFFF
+// Extension OMX errors  from 0x8F000000 - 0x90000000
+// Standard OMX errors   from 0x80001000 - 0x80001024 (0x80001024 current)
+//
+
+// returns true if err is a recognized OMX error code.
+// as OMX error is OMX_S32, this is an int32_t type
+static inline bool isOMXError(int32_t err) {
+    return (ERROR_CODEC_MIN <= err && err <= ERROR_CODEC_MAX);
+}
+
+// converts an OMX error to a status_t
+static inline status_t statusFromOMXError(int32_t omxError) {
+    switch (omxError) {
+    case OMX_ErrorInvalidComponentName:
+    case OMX_ErrorComponentNotFound:
+        return NAME_NOT_FOUND; // can trigger illegal argument error for provided names.
+    default:
+        return isOMXError(omxError) ? omxError : 0; // no translation required
+    }
+}
+
+// checks and converts status_t to a non-side-effect status_t
+static inline status_t makeNoSideEffectStatus(status_t err) {
+    switch (err) {
+    // the following errors have side effects and may come
+    // from other code modules. Remap for safety reasons.
+    case INVALID_OPERATION:
+    case DEAD_OBJECT:
+        return UNKNOWN_ERROR;
+    default:
+        return err;
+    }
+}
+
 template<class T>
 static void InitOMXParams(T *params) {
     params->nSize = sizeof(T);
@@ -1848,6 +1890,17 @@
             return err;
         }
 
+        // substitute back flexible color format to codec supported format
+        OMX_U32 flexibleEquivalent;
+        if (compressionFormat == OMX_VIDEO_CodingUnused &&
+                isFlexibleColorFormat(
+                        mOMX, mNode, format.eColorFormat, &flexibleEquivalent) &&
+                colorFormat == flexibleEquivalent) {
+            ALOGI("[%s] using color format %#x in place of %#x",
+                    mComponentName.c_str(), format.eColorFormat, colorFormat);
+            colorFormat = format.eColorFormat;
+        }
+
         // The following assertion is violated by TI's video decoder.
         // CHECK_EQ(format.nIndex, index);
 
@@ -2824,7 +2877,7 @@
 }
 
 // static
-void ACodec::describeDefaultColorFormat(DescribeColorFormatParams &params) {
+bool ACodec::describeDefaultColorFormat(DescribeColorFormatParams &params) {
     MediaImage &image = params.sMediaImage;
     memset(&image, 0, sizeof(image));
 
@@ -2836,7 +2889,7 @@
     if (params.nStride == 0 || params.nSliceHeight == 0) {
         ALOGW("cannot describe color format 0x%x = %d with stride=%u and sliceHeight=%u",
                 fmt, fmt, params.nStride, params.nSliceHeight);
-        return;
+        return false;
     }
 
     image.mWidth = params.nFrameWidth;
@@ -2848,7 +2901,7 @@
         fmt != OMX_COLOR_FormatYUV420SemiPlanar &&
         fmt != OMX_COLOR_FormatYUV420PackedSemiPlanar) {
         ALOGW("do not know color format 0x%x = %d", fmt, fmt);
-        return;
+        return false;
     }
 
     // set-up YUV format
@@ -2898,6 +2951,67 @@
         default:
             TRESPASS();
     }
+    return true;
+}
+
+// static
+bool ACodec::describeColorFormat(
+        const sp<IOMX> &omx, IOMX::node_id node,
+        DescribeColorFormatParams &describeParams)
+{
+    OMX_INDEXTYPE describeColorFormatIndex;
+    if (omx->getExtensionIndex(
+            node, "OMX.google.android.index.describeColorFormat",
+            &describeColorFormatIndex) != OK ||
+        omx->getParameter(
+            node, describeColorFormatIndex,
+            &describeParams, sizeof(describeParams)) != OK) {
+        return describeDefaultColorFormat(describeParams);
+    }
+    return describeParams.sMediaImage.mType !=
+            MediaImage::MEDIA_IMAGE_TYPE_UNKNOWN;
+}
+
+// static
+bool ACodec::isFlexibleColorFormat(
+         const sp<IOMX> &omx, IOMX::node_id node,
+         uint32_t colorFormat, OMX_U32 *flexibleEquivalent) {
+    DescribeColorFormatParams describeParams;
+    InitOMXParams(&describeParams);
+    describeParams.eColorFormat = (OMX_COLOR_FORMATTYPE)colorFormat;
+    // reasonable dummy values
+    describeParams.nFrameWidth = 128;
+    describeParams.nFrameHeight = 128;
+    describeParams.nStride = 128;
+    describeParams.nSliceHeight = 128;
+
+    CHECK(flexibleEquivalent != NULL);
+
+    if (!describeColorFormat(omx, node, describeParams)) {
+        return false;
+    }
+
+    const MediaImage &img = describeParams.sMediaImage;
+    if (img.mType == MediaImage::MEDIA_IMAGE_TYPE_YUV) {
+        if (img.mNumPlanes != 3 ||
+            img.mPlane[img.Y].mHorizSubsampling != 1 ||
+            img.mPlane[img.Y].mVertSubsampling != 1) {
+            return false;
+        }
+
+        // YUV 420
+        if (img.mPlane[img.U].mHorizSubsampling == 2
+                && img.mPlane[img.U].mVertSubsampling == 2
+                && img.mPlane[img.V].mHorizSubsampling == 2
+                && img.mPlane[img.V].mVertSubsampling == 2) {
+            // possible flexible YUV420 format
+            if (img.mBitDepth <= 8) {
+               *flexibleEquivalent = OMX_COLOR_FormatYUV420Flexible;
+               return true;
+            }
+        }
+    }
+    return false;
 }
 
 status_t ACodec::getPortFormat(OMX_U32 portIndex, sp<AMessage> &notify) {
@@ -2927,7 +3041,6 @@
                     notify->setInt32("slice-height", videoDef->nSliceHeight);
                     notify->setInt32("color-format", videoDef->eColorFormat);
 
-
                     DescribeColorFormatParams describeParams;
                     InitOMXParams(&describeParams);
                     describeParams.eColorFormat = videoDef->eColorFormat;
@@ -2936,17 +3049,7 @@
                     describeParams.nStride = videoDef->nStride;
                     describeParams.nSliceHeight = videoDef->nSliceHeight;
 
-                    OMX_INDEXTYPE describeColorFormatIndex;
-                    if (mOMX->getExtensionIndex(
-                            mNode, "OMX.google.android.index.describeColorFormat",
-                            &describeColorFormatIndex) ||
-                        mOMX->getParameter(
-                            mNode, describeColorFormatIndex,
-                            &describeParams, sizeof(describeParams))) {
-                        describeDefaultColorFormat(describeParams);
-                    }
-
-                    if (describeParams.sMediaImage.mType != MediaImage::MEDIA_IMAGE_TYPE_UNKNOWN) {
+                    if (describeColorFormat(mOMX, mNode, describeParams)) {
                         notify->setBuffer(
                                 "image-data",
                                 ABuffer::CreateAsCopy(
@@ -3268,8 +3371,18 @@
 void ACodec::signalError(OMX_ERRORTYPE error, status_t internalError) {
     sp<AMessage> notify = mNotify->dup();
     notify->setInt32("what", CodecBase::kWhatError);
-    notify->setInt32("omx-error", error);
+    ALOGE("signalError(omxError %#x, internalError %d)", error, internalError);
+
+    if (internalError == UNKNOWN_ERROR) { // find better error code
+        const status_t omxStatus = statusFromOMXError(error);
+        if (omxStatus != 0) {
+            internalError = omxStatus;
+        } else {
+            ALOGW("Invalid OMX error %#x", error);
+        }
+    }
     notify->setInt32("err", internalError);
+    notify->setInt32("actionCode", ACTION_CODE_FATAL); // could translate from OMX error.
     notify->post();
 }
 
@@ -3493,6 +3606,7 @@
         case ACodec::kWhatCreateInputSurface:
         case ACodec::kWhatSignalEndOfInputStream:
         {
+            // This may result in an app illegal state exception.
             ALOGE("Message 0x%x was not handled", msg->what());
             mCodec->signalError(OMX_ErrorUndefined, INVALID_OPERATION);
             return true;
@@ -3500,6 +3614,7 @@
 
         case ACodec::kWhatOMXDied:
         {
+            // This will result in kFlagSawMediaServerDie handling in MediaCodec.
             ALOGE("OMX/mediaserver died, signalling error!");
             mCodec->signalError(OMX_ErrorResourcesLost, DEAD_OBJECT);
             break;
@@ -3598,7 +3713,13 @@
 
     ALOGE("[%s] ERROR(0x%08lx)", mCodec->mComponentName.c_str(), data1);
 
-    mCodec->signalError((OMX_ERRORTYPE)data1);
+    // verify OMX component sends back an error we expect.
+    OMX_ERRORTYPE omxError = (OMX_ERRORTYPE)data1;
+    if (!isOMXError(omxError)) {
+        ALOGW("Invalid OMX error %#x", omxError);
+        omxError = OMX_ErrorUndefined;
+    }
+    mCodec->signalError(omxError);
 
     return true;
 }
@@ -4041,7 +4162,7 @@
                     info->mGraphicBuffer.get(), -1)) == OK) {
             info->mStatus = BufferInfo::OWNED_BY_NATIVE_WINDOW;
         } else {
-            mCodec->signalError(OMX_ErrorUndefined, err);
+            mCodec->signalError(OMX_ErrorUndefined, makeNoSideEffectStatus(err));
             info->mStatus = BufferInfo::OWNED_BY_US;
         }
     } else {
@@ -4413,7 +4534,7 @@
         ALOGE("[%s] configureCodec returning error %d",
               mCodec->mComponentName.c_str(), err);
 
-        mCodec->signalError(OMX_ErrorUndefined, err);
+        mCodec->signalError(OMX_ErrorUndefined, makeNoSideEffectStatus(err));
         return false;
     }
 
@@ -4560,7 +4681,7 @@
              "(error 0x%08x)",
              err);
 
-        mCodec->signalError(OMX_ErrorUndefined, err);
+        mCodec->signalError(OMX_ErrorUndefined, makeNoSideEffectStatus(err));
 
         mCodec->changeState(mCodec->mLoadedState);
     }
@@ -5088,7 +5209,7 @@
                          "port reconfiguration (error 0x%08x)",
                          err);
 
-                    mCodec->signalError(OMX_ErrorUndefined, err);
+                    mCodec->signalError(OMX_ErrorUndefined, makeNoSideEffectStatus(err));
 
                     // This is technically not correct, but appears to be
                     // the only way to free the component instance.
diff --git a/media/libstagefright/AudioSource.cpp b/media/libstagefright/AudioSource.cpp
index a67fabe..804f131 100644
--- a/media/libstagefright/AudioSource.cpp
+++ b/media/libstagefright/AudioSource.cpp
@@ -155,12 +155,12 @@
     }
 
     mStarted = false;
+    mFrameAvailableCondition.signal();
+
     mRecord->stop();
     waitOutstandingEncodingFrames_l();
     releaseQueuedFrames_l();
 
-    mFrameAvailableCondition.signal();
-
     return OK;
 }
 
diff --git a/media/libstagefright/DataSource.cpp b/media/libstagefright/DataSource.cpp
index 6e0f37a..908cdca 100644
--- a/media/libstagefright/DataSource.cpp
+++ b/media/libstagefright/DataSource.cpp
@@ -13,6 +13,8 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+//#define LOG_NDEBUG 0
+#define LOG_TAG "DataSource"
 
 #include "include/AMRExtractor.h"
 
@@ -33,6 +35,7 @@
 
 #include <media/IMediaHTTPConnection.h>
 #include <media/IMediaHTTPService.h>
+#include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AMessage.h>
 #include <media/stagefright/DataSource.h>
 #include <media/stagefright/DataURISource.h>
@@ -182,7 +185,12 @@
 sp<DataSource> DataSource::CreateFromURI(
         const sp<IMediaHTTPService> &httpService,
         const char *uri,
-        const KeyedVector<String8, String8> *headers) {
+        const KeyedVector<String8, String8> *headers,
+        AString *sniffedMIME) {
+    if (sniffedMIME != NULL) {
+        *sniffedMIME = "";
+    }
+
     bool isWidevine = !strncasecmp("widevine://", uri, 11);
 
     sp<DataSource> source;
@@ -202,6 +210,7 @@
         }
 
         if (httpSource->connect(uri, headers) != OK) {
+            ALOGE("Failed to connect http source!");
             return NULL;
         }
 
@@ -214,9 +223,76 @@
                         &copy, &cacheConfig, &disconnectAtHighwatermark);
             }
 
-            source = new NuCachedSource2(
+            sp<NuCachedSource2> cachedSource = new NuCachedSource2(
                     httpSource,
                     cacheConfig.isEmpty() ? NULL : cacheConfig.string());
+
+            String8 contentType = httpSource->getMIMEType();
+
+            if (strncasecmp(contentType.string(), "audio/", 6)) {
+                // We're not doing this for streams that appear to be audio-only
+                // streams to ensure that even low bandwidth streams start
+                // playing back fairly instantly.
+
+                // We're going to prefill the cache before trying to instantiate
+                // the extractor below, as the latter is an operation that otherwise
+                // could block on the datasource for a significant amount of time.
+                // During that time we'd be unable to abort the preparation phase
+                // without this prefill.
+
+                // Initially make sure we have at least 192 KB for the sniff
+                // to complete without blocking.
+                static const size_t kMinBytesForSniffing = 192 * 1024;
+
+                off64_t metaDataSize = -1ll;
+                for (;;) {
+                    status_t finalStatus;
+                    size_t cachedDataRemaining =
+                            cachedSource->approxDataRemaining(&finalStatus);
+
+                    if (finalStatus != OK || (metaDataSize >= 0
+                            && (off64_t)cachedDataRemaining >= metaDataSize)) {
+                        ALOGV("stop caching, status %d, "
+                                "metaDataSize %lld, cachedDataRemaining %zu",
+                                finalStatus, metaDataSize, cachedDataRemaining);
+                        break;
+                    }
+
+                    ALOGV("now cached %zu bytes of data", cachedDataRemaining);
+
+                    if (metaDataSize < 0
+                            && cachedDataRemaining >= kMinBytesForSniffing) {
+                        String8 tmp;
+                        float confidence;
+                        sp<AMessage> meta;
+                        if (!cachedSource->sniff(&tmp, &confidence, &meta)) {
+                            return NULL;
+                        }
+
+                        // We successfully identified the file's extractor to
+                        // be, remember this mime type so we don't have to
+                        // sniff it again when we call MediaExtractor::Create()
+                        if (sniffedMIME != NULL) {
+                            *sniffedMIME = tmp.string();
+                        }
+
+                        if (meta == NULL
+                                || !meta->findInt64("meta-data-size",
+                                     reinterpret_cast<int64_t*>(&metaDataSize))) {
+                            metaDataSize = kDefaultMetaSize;
+                        }
+
+                        if (metaDataSize < 0ll) {
+                            ALOGE("invalid metaDataSize = %lld bytes", metaDataSize);
+                            return NULL;
+                        }
+                    }
+
+                    usleep(200000);
+                }
+            }
+
+            source = cachedSource;
         } else {
             // We do not want that prefetching, caching, datasource wrapper
             // in the widevine:// case.
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index 7c02959..42691b9 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -113,24 +113,26 @@
 }
 // static
 sp<MediaCodec> MediaCodec::CreateByType(
-        const sp<ALooper> &looper, const char *mime, bool encoder) {
+        const sp<ALooper> &looper, const char *mime, bool encoder, status_t *err) {
     sp<MediaCodec> codec = new MediaCodec(looper);
-    if (codec->init(mime, true /* nameIsType */, encoder) != OK) {
-        return NULL;
-    }
 
-    return codec;
+    const status_t ret = codec->init(mime, true /* nameIsType */, encoder);
+    if (err != NULL) {
+        *err = ret;
+    }
+    return ret == OK ? codec : NULL; // NULL deallocates codec.
 }
 
 // static
 sp<MediaCodec> MediaCodec::CreateByComponentName(
-        const sp<ALooper> &looper, const char *name) {
+        const sp<ALooper> &looper, const char *name, status_t *err) {
     sp<MediaCodec> codec = new MediaCodec(looper);
-    if (codec->init(name, false /* nameIsType */, false /* encoder */) != OK) {
-        return NULL;
-    }
 
-    return codec;
+    const status_t ret = codec->init(name, false /* nameIsType */, false /* encoder */);
+    if (err != NULL) {
+        *err = ret;
+    }
+    return ret == OK ? codec : NULL; // NULL deallocates codec.
 }
 
 MediaCodec::MediaCodec(const sp<ALooper> &looper)
@@ -139,6 +141,7 @@
       mCodec(NULL),
       mReplyID(0),
       mFlags(0),
+      mStickyError(OK),
       mSoftRenderer(NULL),
       mBatteryStatNotified(false),
       mIsVideo(false),
@@ -195,16 +198,16 @@
         if (tmp.endsWith(".secure")) {
             tmp.erase(tmp.size() - 7, 7);
         }
-        const MediaCodecList *mcl = MediaCodecList::getInstance();
+        const sp<IMediaCodecList> mcl = MediaCodecList::getInstance();
         ssize_t codecIdx = mcl->findCodecByName(tmp.c_str());
         if (codecIdx >= 0) {
-            Vector<AString> types;
-            if (mcl->getSupportedTypes(codecIdx, &types) == OK) {
-                for (size_t i = 0; i < types.size(); i++) {
-                    if (types[i].startsWith("video/")) {
-                        needDedicatedLooper = true;
-                        break;
-                    }
+            const sp<MediaCodecInfo> info = mcl->getCodecInfo(codecIdx);
+            Vector<AString> mimes;
+            info->getSupportedMimes(&mimes);
+            for (size_t i = 0; i < mimes.size(); i++) {
+                if (mimes[i].startsWith("video/")) {
+                    needDedicatedLooper = true;
+                    break;
                 }
             }
         }
@@ -330,6 +333,7 @@
     mLooper->unregisterHandler(id());
 
     mFlags = 0;    // clear all flags
+    mStickyError = OK;
 
     // reset state not reset by setState(UNINITIALIZED)
     mReplyID = 0;
@@ -620,10 +624,12 @@
 
 bool MediaCodec::handleDequeueInputBuffer(uint32_t replyID, bool newRequest) {
     if (!isExecuting() || (mFlags & kFlagIsAsync)
-            || (mFlags & kFlagStickyError)
             || (newRequest && (mFlags & kFlagDequeueInputPending))) {
         PostReplyWithError(replyID, INVALID_OPERATION);
         return true;
+    } else if (mFlags & kFlagStickyError) {
+        PostReplyWithError(replyID, getStickyError());
+        return true;
     }
 
     ssize_t index = dequeuePortBuffer(kPortIndexInput);
@@ -644,9 +650,10 @@
     sp<AMessage> response = new AMessage;
 
     if (!isExecuting() || (mFlags & kFlagIsAsync)
-            || (mFlags & kFlagStickyError)
             || (newRequest && (mFlags & kFlagDequeueOutputPending))) {
         response->setInt32("err", INVALID_OPERATION);
+    } else if (mFlags & kFlagStickyError) {
+        response->setInt32("err", getStickyError());
     } else if (mFlags & kFlagOutputBuffersChanged) {
         response->setInt32("err", INFO_OUTPUT_BUFFERS_CHANGED);
         mFlags &= ~kFlagOutputBuffersChanged;
@@ -705,16 +712,12 @@
             switch (what) {
                 case CodecBase::kWhatError:
                 {
-                    int32_t omxError, internalError;
-                    CHECK(msg->findInt32("omx-error", &omxError));
-                    CHECK(msg->findInt32("err", &internalError));
+                    int32_t err, actionCode;
+                    CHECK(msg->findInt32("err", &err));
+                    CHECK(msg->findInt32("actionCode", &actionCode));
 
-                    ALOGE("Codec reported an error. "
-                          "(omx error 0x%08x, internalError %d)",
-                          omxError, internalError);
-
-                    if (omxError == OMX_ErrorResourcesLost
-                            && internalError == DEAD_OBJECT) {
+                    ALOGE("Codec reported err %#x, actionCode %d", err, actionCode);
+                    if (err == DEAD_OBJECT) {
                         mFlags |= kFlagSawMediaServerDie;
                     }
 
@@ -774,15 +777,24 @@
                         {
                             sendErrorReponse = false;
 
-                            mFlags |= kFlagStickyError;
+                            setStickyError(err);
                             postActivityNotificationIfPossible();
 
                             cancelPendingDequeueOperations();
 
                             if (mFlags & kFlagIsAsync) {
-                                onError(omxError, 0);
+                                onError(err, actionCode);
                             }
-                            setState(UNINITIALIZED);
+                            switch (actionCode) {
+                            case ACTION_CODE_TRANSIENT:
+                                break;
+                            case ACTION_CODE_RECOVERABLE:
+                                setState(INITIALIZED);
+                                break;
+                            default:
+                                setState(UNINITIALIZED);
+                                break;
+                            }
                             break;
                         }
 
@@ -790,19 +802,32 @@
                         {
                             sendErrorReponse = false;
 
-                            mFlags |= kFlagStickyError;
+                            setStickyError(err);
                             postActivityNotificationIfPossible();
 
-                            if (mFlags & kFlagIsAsync) {
-                                onError(omxError, 0);
+                            // actionCode in an uninitialized state is always fatal.
+                            if (mState == UNINITIALIZED) {
+                                actionCode = ACTION_CODE_FATAL;
                             }
-                            setState(UNINITIALIZED);
+                            if (mFlags & kFlagIsAsync) {
+                                onError(err, actionCode);
+                            }
+                            switch (actionCode) {
+                            case ACTION_CODE_TRANSIENT:
+                                break;
+                            case ACTION_CODE_RECOVERABLE:
+                                setState(INITIALIZED);
+                                break;
+                            default:
+                                setState(UNINITIALIZED);
+                                break;
+                            }
                             break;
                         }
                     }
 
                     if (sendErrorReponse) {
-                        PostReplyWithError(mReplyID, UNKNOWN_ERROR);
+                        PostReplyWithError(mReplyID, err);
                     }
                     break;
                 }
@@ -1009,7 +1034,7 @@
                             ALOGE("queueCSDInputBuffer failed w/ error %d",
                                   err);
 
-                            mFlags |= kFlagStickyError;
+                            setStickyError(err);
                             postActivityNotificationIfPossible();
 
                             cancelPendingDequeueOperations();
@@ -1401,9 +1426,12 @@
             uint32_t replyID;
             CHECK(msg->senderAwaitsResponse(&replyID));
 
-            if (!isExecuting() || (mFlags & kFlagStickyError)) {
+            if (!isExecuting()) {
                 PostReplyWithError(replyID, INVALID_OPERATION);
                 break;
+            } else if (mFlags & kFlagStickyError) {
+                PostReplyWithError(replyID, getStickyError());
+                break;
             }
 
             status_t err = onQueueInputBuffer(msg);
@@ -1472,9 +1500,12 @@
             uint32_t replyID;
             CHECK(msg->senderAwaitsResponse(&replyID));
 
-            if (!isExecuting() || (mFlags & kFlagStickyError)) {
+            if (!isExecuting()) {
                 PostReplyWithError(replyID, INVALID_OPERATION);
                 break;
+            } else if (mFlags & kFlagStickyError) {
+                PostReplyWithError(replyID, getStickyError());
+                break;
             }
 
             status_t err = onReleaseOutputBuffer(msg);
@@ -1488,9 +1519,12 @@
             uint32_t replyID;
             CHECK(msg->senderAwaitsResponse(&replyID));
 
-            if (!isExecuting() || (mFlags & kFlagStickyError)) {
+            if (!isExecuting()) {
                 PostReplyWithError(replyID, INVALID_OPERATION);
                 break;
+            } else if (mFlags & kFlagStickyError) {
+                PostReplyWithError(replyID, getStickyError());
+                break;
             }
 
             mReplyID = replyID;
@@ -1503,10 +1537,12 @@
             uint32_t replyID;
             CHECK(msg->senderAwaitsResponse(&replyID));
 
-            if (!isExecuting() || (mFlags & kFlagIsAsync)
-                    || (mFlags & kFlagStickyError)) {
+            if (!isExecuting() || (mFlags & kFlagIsAsync)) {
                 PostReplyWithError(replyID, INVALID_OPERATION);
                 break;
+            } else if (mFlags & kFlagStickyError) {
+                PostReplyWithError(replyID, getStickyError());
+                break;
             }
 
             int32_t portIndex;
@@ -1535,9 +1571,12 @@
             uint32_t replyID;
             CHECK(msg->senderAwaitsResponse(&replyID));
 
-            if (!isExecuting() || (mFlags & kFlagStickyError)) {
+            if (!isExecuting()) {
                 PostReplyWithError(replyID, INVALID_OPERATION);
                 break;
+            } else if (mFlags & kFlagStickyError) {
+                PostReplyWithError(replyID, getStickyError());
+                break;
             }
 
             mReplyID = replyID;
@@ -1561,10 +1600,12 @@
             if ((mState != CONFIGURED && mState != STARTING &&
                  mState != STARTED && mState != FLUSHING &&
                  mState != FLUSHED)
-                    || (mFlags & kFlagStickyError)
                     || format == NULL) {
                 PostReplyWithError(replyID, INVALID_OPERATION);
                 break;
+            } else if (mFlags & kFlagStickyError) {
+                PostReplyWithError(replyID, getStickyError());
+                break;
             }
 
             sp<AMessage> response = new AMessage;
@@ -1687,6 +1728,7 @@
         mFlags &= ~kFlagIsEncoder;
         mFlags &= ~kFlagGatherCodecSpecificData;
         mFlags &= ~kFlagIsAsync;
+        mStickyError = OK;
 
         mActivityNotify.clear();
         mCallback.clear();
diff --git a/media/libstagefright/MediaCodecList.cpp b/media/libstagefright/MediaCodecList.cpp
index d021533..7f8b7f5 100644
--- a/media/libstagefright/MediaCodecList.cpp
+++ b/media/libstagefright/MediaCodecList.cpp
@@ -18,13 +18,19 @@
 #define LOG_TAG "MediaCodecList"
 #include <utils/Log.h>
 
-#include <media/stagefright/MediaCodecList.h>
+#include <binder/IServiceManager.h>
+
+#include <media/IMediaCodecList.h>
+#include <media/IMediaPlayerService.h>
+#include <media/MediaCodecInfo.h>
 
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/MediaCodecList.h>
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/OMXClient.h>
 #include <media/stagefright/OMXCodec.h>
+
 #include <utils/threads.h>
 
 #include <libexpat/expat.h>
@@ -33,18 +39,47 @@
 
 static Mutex sInitMutex;
 
-// static
-MediaCodecList *MediaCodecList::sCodecList;
+static MediaCodecList *gCodecList = NULL;
 
 // static
-const MediaCodecList *MediaCodecList::getInstance() {
+sp<IMediaCodecList> MediaCodecList::sCodecList;
+
+// static
+sp<IMediaCodecList> MediaCodecList::getLocalInstance() {
     Mutex::Autolock autoLock(sInitMutex);
 
-    if (sCodecList == NULL) {
-        sCodecList = new MediaCodecList;
+    if (gCodecList == NULL) {
+        gCodecList = new MediaCodecList;
+        if (gCodecList->initCheck() == OK) {
+            sCodecList = gCodecList;
+        }
     }
 
-    return sCodecList->initCheck() == OK ? sCodecList : NULL;
+    return sCodecList;
+}
+
+static Mutex sRemoteInitMutex;
+
+sp<IMediaCodecList> MediaCodecList::sRemoteList;
+
+// static
+sp<IMediaCodecList> MediaCodecList::getInstance() {
+    Mutex::Autolock _l(sRemoteInitMutex);
+    if (sRemoteList == NULL) {
+        sp<IBinder> binder =
+            defaultServiceManager()->getService(String16("media.player"));
+        sp<IMediaPlayerService> service =
+            interface_cast<IMediaPlayerService>(binder);
+        if (service.get() != NULL) {
+            sRemoteList = service->getCodecList();
+        }
+
+        if (sRemoteList == NULL) {
+            // if failed to get remote list, create local list
+            sRemoteList = getLocalInstance();
+        }
+    }
+    return sRemoteList;
 }
 
 MediaCodecList::MediaCodecList()
@@ -59,37 +94,69 @@
         mHrefBase = AString(codecs_xml, href_base_end - codecs_xml + 1);
     }
 
-    mInitCheck = OK;
+    mInitCheck = OK; // keeping this here for safety
     mCurrentSection = SECTION_TOPLEVEL;
     mDepth = 0;
 
+    OMXClient client;
+    mInitCheck = client.connect();
+    if (mInitCheck != OK) {
+        return;
+    }
+    mOMX = client.interface();
     parseXMLFile(codecs_xml);
+    mOMX.clear();
 
     if (mInitCheck != OK) {
         mCodecInfos.clear();
-        mCodecQuirks.clear();
         return;
     }
 
     for (size_t i = mCodecInfos.size(); i-- > 0;) {
-        CodecInfo *info = &mCodecInfos.editItemAt(i);
+        const MediaCodecInfo &info = *mCodecInfos.itemAt(i).get();
 
-        if (info->mTypes == 0) {
+        if (info.mCaps.size() == 0) {
             // No types supported by this component???
             ALOGW("Component %s does not support any type of media?",
-                  info->mName.c_str());
+                  info.mName.c_str());
 
             mCodecInfos.removeAt(i);
 #if LOG_NDEBUG == 0
         } else {
-            for (size_t type_ix = 0; type_ix < mTypes.size(); ++type_ix) {
-                uint32_t typeMask = 1ul << mTypes.valueAt(type_ix);
-                if (info->mTypes & typeMask) {
-                    AString mime = mTypes.keyAt(type_ix);
-                    uint32_t bit = mTypes.valueAt(type_ix);
+            for (size_t type_ix = 0; type_ix < info.mCaps.size(); ++type_ix) {
+                AString mime = info.mCaps.keyAt(type_ix);
+                const sp<MediaCodecInfo::Capabilities> &caps = info.mCaps.valueAt(type_ix);
 
-                    ALOGV("%s codec info for %s: %s", info->mName.c_str(), mime.c_str(),
-                            info->mCaps.editValueFor(bit)->debugString().c_str());
+                ALOGV("%s codec info for %s: %s", info.mName.c_str(), mime.c_str(),
+                        caps->getDetails()->debugString().c_str());
+                ALOGV("    flags=%d", caps->getFlags());
+                {
+                    Vector<uint32_t> colorFormats;
+                    caps->getSupportedColorFormats(&colorFormats);
+                    AString nice;
+                    for (size_t ix = 0; ix < colorFormats.size(); ix++) {
+                        if (ix > 0) {
+                            nice.append(", ");
+                        }
+                        nice.append(colorFormats.itemAt(ix));
+                    }
+                    ALOGV("    colors=[%s]", nice.c_str());
+                }
+                {
+                    Vector<MediaCodecInfo::ProfileLevel> profileLevels;
+                    caps->getSupportedProfileLevels(&profileLevels);
+                    AString nice;
+                    for (size_t ix = 0; ix < profileLevels.size(); ix++) {
+                        if (ix > 0) {
+                            nice.append(", ");
+                        }
+                        const MediaCodecInfo::ProfileLevel &pl =
+                            profileLevels.itemAt(ix);
+                        nice.append(pl.mProfile);
+                        nice.append("/");
+                        nice.append(pl.mLevel);
+                    }
+                    ALOGV("    levels=[%s]", nice.c_str());
                 }
             }
 #endif
@@ -294,9 +361,8 @@
         case SECTION_DECODER_TYPE:
         case SECTION_ENCODER_TYPE:
         {
-            CodecInfo *info = &mCodecInfos.editItemAt(mCodecInfos.size() - 1);
             // ignore limits and features specified outside of type
-            bool outside = !inType && info->mSoleType == 0;
+            bool outside = !inType && !mCurrentInfo->mHasSoleMime;
             if (outside && (!strcmp(name, "Limit") || !strcmp(name, "Feature"))) {
                 ALOGW("ignoring %s specified outside of a Type", name);
             } else if (!strcmp(name, "Limit")) {
@@ -344,8 +410,7 @@
                     (mCurrentSection == SECTION_DECODER_TYPE
                             ? SECTION_DECODER : SECTION_ENCODER);
 
-                CodecInfo *info = &mCodecInfos.editItemAt(mCodecInfos.size() - 1);
-                info->mCurrentCaps = NULL;
+                mCurrentInfo->complete();
             }
             break;
         }
@@ -354,9 +419,8 @@
         {
             if (!strcmp(name, "MediaCodec")) {
                 mCurrentSection = SECTION_DECODERS;
-
-                CodecInfo *info = &mCodecInfos.editItemAt(mCodecInfos.size() - 1);
-                info->mCurrentCaps = NULL;
+                mCurrentInfo->complete();
+                mCurrentInfo = NULL;
             }
             break;
         }
@@ -365,9 +429,8 @@
         {
             if (!strcmp(name, "MediaCodec")) {
                 mCurrentSection = SECTION_ENCODERS;
-
-                CodecInfo *info = &mCodecInfos.editItemAt(mCodecInfos.size() - 1);
-                info->mCurrentCaps = NULL;
+                mCurrentInfo->complete();;
+                mCurrentInfo = NULL;
             }
             break;
         }
@@ -418,28 +481,27 @@
         return -EINVAL;
     }
 
-    addMediaCodec(encoder, name, type);
-
-    return OK;
+    mCurrentInfo = new MediaCodecInfo(name, encoder, type);
+    mCodecInfos.push_back(mCurrentInfo);
+    return initializeCapabilities(type);
 }
 
-void MediaCodecList::addMediaCodec(
-        bool encoder, const char *name, const char *type) {
-    mCodecInfos.push();
-    CodecInfo *info = &mCodecInfos.editItemAt(mCodecInfos.size() - 1);
-    info->mName = name;
-    info->mIsEncoder = encoder;
-    info->mSoleType = 0;
-    info->mTypes = 0;
-    info->mQuirks = 0;
-    info->mCurrentCaps = NULL;
+status_t MediaCodecList::initializeCapabilities(const char *type) {
+    ALOGV("initializeCapabilities %s:%s",
+            mCurrentInfo->mName.c_str(), type);
 
-    if (type != NULL) {
-        addType(type);
-        // if type was specified in attributes, we do not allow
-        // subsequent types
-        info->mSoleType = info->mTypes;
+    CodecCapabilities caps;
+    status_t err = QueryCodec(
+            mOMX,
+            mCurrentInfo->mName.c_str(),
+            type,
+            mCurrentInfo->mIsEncoder,
+            &caps);
+    if (err != OK) {
+        return err;
     }
+
+    return mCurrentInfo->initializeCapabilities(caps);
 }
 
 status_t MediaCodecList::addQuirk(const char **attrs) {
@@ -464,36 +526,13 @@
         return -EINVAL;
     }
 
-    uint32_t bit;
-    ssize_t index = mCodecQuirks.indexOfKey(name);
-    if (index < 0) {
-        bit = mCodecQuirks.size();
-
-        if (bit == 32) {
-            ALOGW("Too many distinct quirk names in configuration.");
-            return OK;
-        }
-
-        mCodecQuirks.add(name, bit);
-    } else {
-        bit = mCodecQuirks.valueAt(index);
-    }
-
-    CodecInfo *info = &mCodecInfos.editItemAt(mCodecInfos.size() - 1);
-    info->mQuirks |= 1ul << bit;
-
+    mCurrentInfo->addQuirk(name);
     return OK;
 }
 
 status_t MediaCodecList::addTypeFromAttributes(const char **attrs) {
     const char *name = NULL;
 
-    CodecInfo *info = &mCodecInfos.editItemAt(mCodecInfos.size() - 1);
-    if (info->mSoleType != 0) {
-        ALOGE("Codec '%s' already had its type specified", info->mName.c_str());
-        return -EINVAL;
-    }
-
     size_t i = 0;
     while (attrs[i] != NULL) {
         if (!strcmp(attrs[i], "name")) {
@@ -513,54 +552,47 @@
         return -EINVAL;
     }
 
-    addType(name);
-
-    return OK;
+    status_t ret = mCurrentInfo->addMime(name);
+    if (ret == OK) {
+        ret = initializeCapabilities(name);
+    }
+    return ret;
 }
 
-void MediaCodecList::addType(const char *name) {
-    uint32_t bit;
-    ssize_t index = mTypes.indexOfKey(name);
-    if (index < 0) {
-        bit = mTypes.size();
-
-        if (bit == 32) {
-            ALOGW("Too many distinct type names in configuration.");
-            return;
-        }
-
-        mTypes.add(name, bit);
-    } else {
-        bit = mTypes.valueAt(index);
-    }
-
-    CodecInfo *info = &mCodecInfos.editItemAt(mCodecInfos.size() - 1);
-    info->mTypes |= 1ul << bit;
-    if (info->mCaps.indexOfKey(bit) < 0) {
-        AMessage *msg = new AMessage();
-        info->mCaps.add(bit, msg);
-    }
-    info->mCurrentCaps = info->mCaps.editValueFor(bit);
-}
-
+// legacy method for non-advanced codecs
 ssize_t MediaCodecList::findCodecByType(
         const char *type, bool encoder, size_t startIndex) const {
-    ssize_t typeIndex = mTypes.indexOfKey(type);
+    static const char *advancedFeatures[] = {
+        "feature-secure-playback",
+        "feature-tunneled-playback",
+    };
 
-    if (typeIndex < 0) {
-        return -ENOENT;
-    }
+    size_t numCodecs = mCodecInfos.size();
+    for (; startIndex < numCodecs; ++startIndex) {
+        const MediaCodecInfo &info = *mCodecInfos.itemAt(startIndex).get();
 
-    uint32_t typeMask = 1ul << mTypes.valueAt(typeIndex);
+        if (info.isEncoder() != encoder) {
+            continue;
+        }
+        sp<MediaCodecInfo::Capabilities> capabilities = info.getCapabilitiesFor(type);
+        if (capabilities == NULL) {
+            continue;
+        }
+        const sp<AMessage> &details = capabilities->getDetails();
 
-    while (startIndex < mCodecInfos.size()) {
-        const CodecInfo &info = mCodecInfos.itemAt(startIndex);
-
-        if (info.mIsEncoder == encoder && (info.mTypes & typeMask)) {
-            return startIndex;
+        int32_t required;
+        bool isAdvanced = false;
+        for (size_t ix = 0; ix < ARRAY_SIZE(advancedFeatures); ix++) {
+            if (details->findInt32(advancedFeatures[ix], &required) &&
+                    required != 0) {
+                isAdvanced = true;
+                break;
+            }
         }
 
-        ++startIndex;
+        if (!isAdvanced) {
+            return startIndex;
+        }
     }
 
     return -ENOENT;
@@ -616,12 +648,11 @@
         return -EINVAL;
     }
 
-    CodecInfo *info = &mCodecInfos.editItemAt(mCodecInfos.size() - 1);
-
     // size, blocks, bitrate, frame-rate, blocks-per-second, aspect-ratio: range
     // quality: range + default + [scale]
     // complexity: range + default
     bool found;
+
     if (name == "aspect-ratio" || name == "bitrate" || name == "block-count"
             || name == "blocks-per-second" || name == "complexity"
             || name == "frame-rate" || name == "quality" || name == "size") {
@@ -672,16 +703,16 @@
             name = in_;
         }
         if (name == "quality") {
-            info->mCurrentCaps->setString("quality-scale", scale);
+            mCurrentInfo->addDetail("quality-scale", scale);
         }
         if (name == "quality" || name == "complexity") {
             AString tag = name;
             tag.append("-default");
-            info->mCurrentCaps->setString(tag.c_str(), def);
+            mCurrentInfo->addDetail(tag, def);
         }
         AString tag = name;
         tag.append("-range");
-        info->mCurrentCaps->setString(tag.c_str(), range);
+        mCurrentInfo->addDetail(tag, range);
     } else {
         AString max, value, ranges;
         if (msg->contains("default")) {
@@ -708,13 +739,13 @@
         if (max.size()) {
             AString tag = "max-";
             tag.append(name);
-            info->mCurrentCaps->setString(tag.c_str(), max);
+            mCurrentInfo->addDetail(tag, max);
         } else if (value.size()) {
-            info->mCurrentCaps->setString(name.c_str(), value);
+            mCurrentInfo->addDetail(name, value);
         } else if (ranges.size()) {
             AString tag = name;
             tag.append("-ranges");
-            info->mCurrentCaps->setString(tag.c_str(), ranges);
+            mCurrentInfo->addDetail(tag, ranges);
         } else {
             ALOGW("Ignoring unrecognized limit '%s'", name.c_str());
         }
@@ -769,16 +800,13 @@
         return -EINVAL;
     }
 
-    CodecInfo *info = &mCodecInfos.editItemAt(mCodecInfos.size() - 1);
-    AString tag = "feature-";
-    tag.append(name);
-    info->mCurrentCaps->setInt32(tag.c_str(), (required == 1) || (optional == 0));
+    mCurrentInfo->addFeature(name, (required == 1) || (optional == 0));
     return OK;
 }
 
 ssize_t MediaCodecList::findCodecByName(const char *name) const {
     for (size_t i = 0; i < mCodecInfos.size(); ++i) {
-        const CodecInfo &info = mCodecInfos.itemAt(i);
+        const MediaCodecInfo &info = *mCodecInfos.itemAt(i).get();
 
         if (info.mName == name) {
             return i;
@@ -792,121 +820,4 @@
     return mCodecInfos.size();
 }
 
-const char *MediaCodecList::getCodecName(size_t index) const {
-    if (index >= mCodecInfos.size()) {
-        return NULL;
-    }
-
-    const CodecInfo &info = mCodecInfos.itemAt(index);
-    return info.mName.c_str();
-}
-
-bool MediaCodecList::isEncoder(size_t index) const {
-    if (index >= mCodecInfos.size()) {
-        return false;
-    }
-
-    const CodecInfo &info = mCodecInfos.itemAt(index);
-    return info.mIsEncoder;
-}
-
-bool MediaCodecList::codecHasQuirk(
-        size_t index, const char *quirkName) const {
-    if (index >= mCodecInfos.size()) {
-        return false;
-    }
-
-    const CodecInfo &info = mCodecInfos.itemAt(index);
-
-    if (info.mQuirks != 0) {
-        ssize_t index = mCodecQuirks.indexOfKey(quirkName);
-        if (index >= 0 && info.mQuirks & (1ul << mCodecQuirks.valueAt(index))) {
-            return true;
-        }
-    }
-
-    return false;
-}
-
-status_t MediaCodecList::getSupportedTypes(
-        size_t index, Vector<AString> *types) const {
-    types->clear();
-
-    if (index >= mCodecInfos.size()) {
-        return -ERANGE;
-    }
-
-    const CodecInfo &info = mCodecInfos.itemAt(index);
-
-    for (size_t i = 0; i < mTypes.size(); ++i) {
-        uint32_t typeMask = 1ul << mTypes.valueAt(i);
-
-        if (info.mTypes & typeMask) {
-            types->push(mTypes.keyAt(i));
-        }
-    }
-
-    return OK;
-}
-
-status_t MediaCodecList::getCodecCapabilities(
-        size_t index, const char *type,
-        Vector<ProfileLevel> *profileLevels,
-        Vector<uint32_t> *colorFormats,
-        uint32_t *flags,
-        sp<AMessage> *capabilities) const {
-    profileLevels->clear();
-    colorFormats->clear();
-
-    if (index >= mCodecInfos.size()) {
-        return -ERANGE;
-    }
-
-    const CodecInfo &info = mCodecInfos.itemAt(index);
-
-    ssize_t typeIndex = mTypes.indexOfKey(type);
-    if (typeIndex < 0) {
-        return -EINVAL;
-    }
-    // essentially doing valueFor without the CHECK abort
-    typeIndex = mTypes.valueAt(typeIndex);
-
-    OMXClient client;
-    status_t err = client.connect();
-    if (err != OK) {
-        return err;
-    }
-
-    CodecCapabilities caps;
-    err = QueryCodec(
-            client.interface(),
-            info.mName.c_str(), type, info.mIsEncoder, &caps);
-
-    if (err != OK) {
-        return err;
-    }
-
-    for (size_t i = 0; i < caps.mProfileLevels.size(); ++i) {
-        const CodecProfileLevel &src = caps.mProfileLevels.itemAt(i);
-
-        ProfileLevel profileLevel;
-        profileLevel.mProfile = src.mProfile;
-        profileLevel.mLevel = src.mLevel;
-        profileLevels->push(profileLevel);
-    }
-
-    for (size_t i = 0; i < caps.mColorFormats.size(); ++i) {
-        colorFormats->push(caps.mColorFormats.itemAt(i));
-    }
-
-    *flags = caps.mFlags;
-
-    // TODO this check will be removed once JNI side is merged
-    if (capabilities != NULL) {
-        *capabilities = info.mCaps.valueFor(typeIndex);
-    }
-
-    return OK;
-}
-
 }  // namespace android
diff --git a/media/libstagefright/MediaCodecSource.cpp b/media/libstagefright/MediaCodecSource.cpp
index 9868ecf..1a80dcc 100644
--- a/media/libstagefright/MediaCodecSource.cpp
+++ b/media/libstagefright/MediaCodecSource.cpp
@@ -54,7 +54,7 @@
     Puller(const sp<MediaSource> &source);
 
     status_t start(const sp<MetaData> &meta, const sp<AMessage> &notify);
-    void stopAsync();
+    void stop();
 
     void pause();
     void resume();
@@ -139,8 +139,17 @@
     return postSynchronouslyAndReturnError(msg);
 }
 
-void MediaCodecSource::Puller::stopAsync() {
-    ALOGV("puller (%s) stopAsync", mIsAudio ? "audio" : "video");
+void MediaCodecSource::Puller::stop() {
+    // Stop source from caller's thread instead of puller's looper.
+    // mSource->stop() is thread-safe, doing it outside the puller's
+    // looper allows us to at least stop if source gets stuck.
+    // If source gets stuck in read(), the looper would never
+    // be able to process the stop(), which could lead to ANR.
+
+    ALOGV("source (%s) stopping", mIsAudio ? "audio" : "video");
+    mSource->stop();
+    ALOGV("source (%s) stopped", mIsAudio ? "audio" : "video");
+
     (new AMessage(kWhatStop, id()))->post();
 }
 
@@ -194,9 +203,6 @@
 
         case kWhatStop:
         {
-            ALOGV("source (%s) stopping", mIsAudio ? "audio" : "video");
-            mSource->stop();
-            ALOGV("source (%s) stopped", mIsAudio ? "audio" : "video");
             ++mPullGeneration;
 
             handleEOS();
@@ -283,7 +289,21 @@
 
 status_t MediaCodecSource::stop() {
     sp<AMessage> msg = new AMessage(kWhatStop, mReflector->id());
-    return postSynchronouslyAndReturnError(msg);
+    status_t err = postSynchronouslyAndReturnError(msg);
+
+    // mPuller->stop() needs to be done outside MediaCodecSource's looper,
+    // as it contains a synchronous call to stop the underlying MediaSource,
+    // which often waits for all outstanding MediaBuffers to return, but
+    // MediaBuffers are only returned when MediaCodecSource looper gets
+    // to process them.
+
+    if (mPuller != NULL) {
+        ALOGI("puller (%s) stopping", mIsVideo ? "video" : "audio");
+        mPuller->stop();
+        ALOGI("puller (%s) stopped", mIsVideo ? "video" : "audio");
+    }
+
+    return err;
 }
 
 status_t MediaCodecSource::pause() {
@@ -301,10 +321,10 @@
     Mutex::Autolock autolock(mOutputBufferLock);
 
     *buffer = NULL;
-    while (mOutputBufferQueue.size() == 0 && !mEncodedReachedEOS) {
+    while (mOutputBufferQueue.size() == 0 && !mEncoderReachedEOS) {
         mOutputBufferCond.wait(mOutputBufferLock);
     }
-    if (!mEncodedReachedEOS) {
+    if (!mEncoderReachedEOS) {
         *buffer = *mOutputBufferQueue.begin();
         mOutputBufferQueue.erase(mOutputBufferQueue.begin());
         return OK;
@@ -330,9 +350,8 @@
       mStarted(false),
       mStopping(false),
       mDoMoreWorkPending(false),
-      mPullerReachedEOS(false),
       mFirstSampleTimeUs(-1ll),
-      mEncodedReachedEOS(false),
+      mEncoderReachedEOS(false),
       mErrorCode(OK) {
     CHECK(mLooper != NULL);
 
@@ -434,7 +453,7 @@
         return err;
     }
 
-    mEncodedReachedEOS = false;
+    mEncoderReachedEOS = false;
     mErrorCode = OK;
 
     return OK;
@@ -465,10 +484,6 @@
     mEncoderOutputBuffers.clear();
 }
 
-bool MediaCodecSource::reachedEOS() {
-    return mEncodedReachedEOS && ((mPuller == NULL) || mPullerReachedEOS);
-}
-
 status_t MediaCodecSource::postSynchronouslyAndReturnError(
         const sp<AMessage> &msg) {
     sp<AMessage> response;
@@ -486,8 +501,8 @@
 }
 
 void MediaCodecSource::signalEOS(status_t err) {
-    if (!mEncodedReachedEOS) {
-        ALOGI("encoder (%s) reached EOS", mIsVideo ? "video" : "audio");
+    if (!mEncoderReachedEOS) {
+        ALOGV("encoder (%s) reached EOS", mIsVideo ? "video" : "audio");
         {
             Mutex::Autolock autoLock(mOutputBufferLock);
             // release all unread media buffers
@@ -496,16 +511,15 @@
                 (*it)->release();
             }
             mOutputBufferQueue.clear();
-            mEncodedReachedEOS = true;
+            mEncoderReachedEOS = true;
             mErrorCode = err;
             mOutputBufferCond.signal();
         }
 
         releaseEncoder();
     }
-    if (mStopping && reachedEOS()) {
-        ALOGI("MediaCodecSource (%s) fully stopped",
-                mIsVideo ? "video" : "audio");
+    if (mStopping && mEncoderReachedEOS) {
+        ALOGI("encoder (%s) stopped", mIsVideo ? "video" : "audio");
         // posting reply to everyone that's waiting
         List<uint32_t>::iterator it;
         for (it = mStopReplyIDQueue.begin();
@@ -755,7 +769,6 @@
                 kWhatPullerNotify, mReflector->id());
         err = mPuller->start(params, notify);
         if (err != OK) {
-            mPullerReachedEOS = true;
             return err;
         }
     }
@@ -774,9 +787,9 @@
         CHECK(msg->findPointer("accessUnit", (void**)&mbuf));
 
         if (mbuf == NULL) {
-            ALOGI("puller (%s) reached EOS",
+            ALOGV("puller (%s) reached EOS",
                     mIsVideo ? "video" : "audio");
-            mPullerReachedEOS = true;
+            signalEOS();
         }
 
         if (mEncoder == NULL) {
@@ -785,9 +798,8 @@
 
             if (mbuf != NULL) {
                 mbuf->release();
-            } else {
-                signalEOS();
             }
+
             break;
         }
 
@@ -833,14 +845,14 @@
     }
     case kWhatStop:
     {
-        ALOGI("MediaCodecSource (%s) stopping", mIsVideo ? "video" : "audio");
+        ALOGI("encoder (%s) stopping", mIsVideo ? "video" : "audio");
 
         uint32_t replyID;
         CHECK(msg->senderAwaitsResponse(&replyID));
 
-        if (reachedEOS()) {
+        if (mEncoderReachedEOS) {
             // if we already reached EOS, reply and return now
-            ALOGI("MediaCodecSource (%s) already stopped",
+            ALOGI("encoder (%s) already stopped",
                     mIsVideo ? "video" : "audio");
             (new AMessage)->postReply(replyID);
             break;
@@ -860,8 +872,6 @@
         if (mFlags & FLAG_USE_SURFACE_INPUT) {
             mEncoder->signalEndOfInputStream();
         } else {
-            CHECK(mPuller != NULL);
-            mPuller->stopAsync();
             signalEOS();
         }
         break;
diff --git a/media/libstagefright/OMXCodec.cpp b/media/libstagefright/OMXCodec.cpp
index 354712c..3d1d40e 100644
--- a/media/libstagefright/OMXCodec.cpp
+++ b/media/libstagefright/OMXCodec.cpp
@@ -35,6 +35,7 @@
 #include <HardwareAPI.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/IMediaPlayerService.h>
+#include <media/stagefright/ACodec.h>
 #include <media/stagefright/MediaBuffer.h>
 #include <media/stagefright/MediaBufferGroup.h>
 #include <media/stagefright/MediaDefs.h>
@@ -197,7 +198,7 @@
         Vector<CodecNameAndQuirks> *matchingCodecs) {
     matchingCodecs->clear();
 
-    const MediaCodecList *list = MediaCodecList::getInstance();
+    const sp<IMediaCodecList> list = MediaCodecList::getInstance();
     if (list == NULL) {
         return;
     }
@@ -213,7 +214,9 @@
 
         index = matchIndex + 1;
 
-        const char *componentName = list->getCodecName(matchIndex);
+        const sp<MediaCodecInfo> info = list->getCodecInfo(matchIndex);
+        CHECK(info != NULL);
+        const char *componentName = info->getCodecName();
 
         // If a specific codec is requested, skip the non-matching ones.
         if (matchComponentName && strcmp(componentName, matchComponentName)) {
@@ -231,7 +234,7 @@
             ssize_t index = matchingCodecs->add();
             CodecNameAndQuirks *entry = &matchingCodecs->editItemAt(index);
             entry->mName = String8(componentName);
-            entry->mQuirks = getComponentQuirks(list, matchIndex);
+            entry->mQuirks = getComponentQuirks(info);
 
             ALOGV("matching '%s' quirks 0x%08x",
                   entry->mName.string(), entry->mQuirks);
@@ -245,18 +248,15 @@
 
 // static
 uint32_t OMXCodec::getComponentQuirks(
-        const MediaCodecList *list, size_t index) {
+        const sp<MediaCodecInfo> &info) {
     uint32_t quirks = 0;
-    if (list->codecHasQuirk(
-                index, "requires-allocate-on-input-ports")) {
+    if (info->hasQuirk("requires-allocate-on-input-ports")) {
         quirks |= kRequiresAllocateBufferOnInputPorts;
     }
-    if (list->codecHasQuirk(
-                index, "requires-allocate-on-output-ports")) {
+    if (info->hasQuirk("requires-allocate-on-output-ports")) {
         quirks |= kRequiresAllocateBufferOnOutputPorts;
     }
-    if (list->codecHasQuirk(
-                index, "output-buffers-are-unreadable")) {
+    if (info->hasQuirk("output-buffers-are-unreadable")) {
         quirks |= kOutputBuffersAreUnreadable;
     }
 
@@ -265,8 +265,7 @@
 
 // static
 bool OMXCodec::findCodecQuirks(const char *componentName, uint32_t *quirks) {
-    const MediaCodecList *list = MediaCodecList::getInstance();
-
+    const sp<IMediaCodecList> list = MediaCodecList::getInstance();
     if (list == NULL) {
         return false;
     }
@@ -277,7 +276,9 @@
         return false;
     }
 
-    *quirks = getComponentQuirks(list, index);
+    const sp<MediaCodecInfo> info = list->getCodecInfo(index);
+    CHECK(info != NULL);
+    *quirks = getComponentQuirks(info);
 
     return true;
 }
@@ -1551,7 +1552,7 @@
     status_t err = mOMX->freeNode(mNode);
     CHECK_EQ(err, (status_t)OK);
 
-    mNode = NULL;
+    mNode = 0;
     setState(DEAD);
 
     clearCodecSpecificData();
@@ -4746,6 +4747,8 @@
     }
 
     // Color format query
+    // return colors in the order reported by the OMX component
+    // prefix "flexible" standard ones with the flexible equivalent
     OMX_VIDEO_PARAM_PORTFORMATTYPE portFormat;
     InitOMXParams(&portFormat);
     portFormat.nPortIndex = !isEncoder ? 1 : 0;
@@ -4756,6 +4759,21 @@
         if (err != OK) {
             break;
         }
+
+        OMX_U32 flexibleEquivalent;
+        if (ACodec::isFlexibleColorFormat(
+                    omx, node, portFormat.eColorFormat, &flexibleEquivalent)) {
+            bool marked = false;
+            for (size_t i = 0; i < caps->mColorFormats.size(); i++) {
+                if (caps->mColorFormats.itemAt(i) == flexibleEquivalent) {
+                    marked = true;
+                    break;
+                }
+            }
+            if (!marked) {
+                caps->mColorFormats.push(flexibleEquivalent);
+            }
+        }
         caps->mColorFormats.push(portFormat.eColorFormat);
     }
 
diff --git a/media/libstagefright/Utils.cpp b/media/libstagefright/Utils.cpp
index 587e264..5f1d1c6 100644
--- a/media/libstagefright/Utils.cpp
+++ b/media/libstagefright/Utils.cpp
@@ -135,6 +135,11 @@
         if (meta->findInt32(kKeyIsADTS, &isADTS)) {
             msg->setInt32("is-adts", true);
         }
+
+        int32_t aacProfile = -1;
+        if (meta->findInt32(kKeyAACAOT, &aacProfile)) {
+            msg->setInt32("aac-profile", aacProfile);
+        }
     }
 
     int32_t maxInputSize;
diff --git a/media/libstagefright/foundation/AString.cpp b/media/libstagefright/foundation/AString.cpp
index 894f65c..9835ca3 100644
--- a/media/libstagefright/foundation/AString.cpp
+++ b/media/libstagefright/foundation/AString.cpp
@@ -20,6 +20,7 @@
 #include <stdlib.h>
 #include <string.h>
 
+#include <binder/Parcel.h>
 #include <utils/String8.h>
 #include "ADebug.h"
 #include "AString.h"
@@ -306,6 +307,14 @@
     return strcmp(mData, other.mData);
 }
 
+int AString::compareIgnoreCase(const AString &other) const {
+    return strcasecmp(mData, other.mData);
+}
+
+bool AString::equalsIgnoreCase(const AString &other) const {
+    return compareIgnoreCase(other) == 0;
+}
+
 void AString::tolower() {
     makeMutable();
 
@@ -342,6 +351,21 @@
     return !strcasecmp(mData + mSize - suffixLen, suffix);
 }
 
+// static
+AString AString::FromParcel(const Parcel &parcel) {
+    size_t size = static_cast<size_t>(parcel.readInt32());
+    return AString(static_cast<const char *>(parcel.readInplace(size)), size);
+}
+
+status_t AString::writeToParcel(Parcel *parcel) const {
+    CHECK_LE(mSize, static_cast<size_t>(INT32_MAX));
+    status_t err = parcel->writeInt32(mSize);
+    if (err == OK) {
+        err = parcel->write(mData, mSize);
+    }
+    return err;
+}
+
 AString StringPrintf(const char *format, ...) {
     va_list ap;
     va_start(ap, format);
diff --git a/media/libstagefright/httplive/LiveSession.cpp b/media/libstagefright/httplive/LiveSession.cpp
index 10cdde2..8667a6b 100644
--- a/media/libstagefright/httplive/LiveSession.cpp
+++ b/media/libstagefright/httplive/LiveSession.cpp
@@ -57,7 +57,7 @@
       mHTTPService(httpService),
       mInPreparationPhase(true),
       mHTTPDataSource(new MediaHTTP(mHTTPService->makeHTTPConnection())),
-      mPrevBandwidthIndex(-1),
+      mCurBandwidthIndex(-1),
       mStreamMask(0),
       mNewStreamMask(0),
       mSwapMask(0),
@@ -68,13 +68,17 @@
       mReconfigurationInProgress(false),
       mSwitchInProgress(false),
       mDisconnectReplyID(0),
-      mSeekReplyID(0) {
+      mSeekReplyID(0),
+      mFirstTimeUsValid(false),
+      mFirstTimeUs(0),
+      mLastSeekTimeUs(0) {
 
     mStreams[kAudioIndex] = StreamItem("audio");
     mStreams[kVideoIndex] = StreamItem("video");
     mStreams[kSubtitleIndex] = StreamItem("subtitles");
 
     for (size_t i = 0; i < kMaxStreams; ++i) {
+        mDiscontinuities.add(indexToType(i), new AnotherPacketSource(NULL /* meta */));
         mPacketSources.add(indexToType(i), new AnotherPacketSource(NULL /* meta */));
         mPacketSources2.add(indexToType(i), new AnotherPacketSource(NULL /* meta */));
     }
@@ -109,31 +113,65 @@
         return -EWOULDBLOCK;
     }
 
+    status_t finalResult;
+    sp<AnotherPacketSource> discontinuityQueue  = mDiscontinuities.valueFor(stream);
+    if (discontinuityQueue->hasBufferAvailable(&finalResult)) {
+        discontinuityQueue->dequeueAccessUnit(accessUnit);
+        // seeking, track switching
+        sp<AMessage> extra;
+        int64_t timeUs;
+        if ((*accessUnit)->meta()->findMessage("extra", &extra)
+                && extra != NULL
+                && extra->findInt64("timeUs", &timeUs)) {
+            // seeking only
+            mLastSeekTimeUs = timeUs;
+            mDiscontinuityOffsetTimesUs.clear();
+            mDiscontinuityAbsStartTimesUs.clear();
+        }
+        return INFO_DISCONTINUITY;
+    }
+
     sp<AnotherPacketSource> packetSource = mPacketSources.valueFor(stream);
 
-    status_t finalResult;
     if (!packetSource->hasBufferAvailable(&finalResult)) {
         return finalResult == OK ? -EAGAIN : finalResult;
     }
 
+    // wait for counterpart
+    sp<AnotherPacketSource> otherSource;
+    if (stream == STREAMTYPE_AUDIO && (mStreamMask & STREAMTYPE_VIDEO)) {
+        otherSource = mPacketSources.valueFor(STREAMTYPE_VIDEO);
+    } else if (stream == STREAMTYPE_VIDEO && (mStreamMask & STREAMTYPE_AUDIO)) {
+        otherSource = mPacketSources.valueFor(STREAMTYPE_AUDIO);
+    }
+    if (otherSource != NULL && !otherSource->hasBufferAvailable(&finalResult)) {
+        return finalResult == OK ? -EAGAIN : finalResult;
+    }
+
     status_t err = packetSource->dequeueAccessUnit(accessUnit);
 
+    size_t streamIdx;
     const char *streamStr;
     switch (stream) {
         case STREAMTYPE_AUDIO:
+            streamIdx = kAudioIndex;
             streamStr = "audio";
             break;
         case STREAMTYPE_VIDEO:
+            streamIdx = kVideoIndex;
             streamStr = "video";
             break;
         case STREAMTYPE_SUBTITLES:
+            streamIdx = kSubtitleIndex;
             streamStr = "subs";
             break;
         default:
             TRESPASS();
     }
 
+    StreamItem& strm = mStreams[streamIdx];
     if (err == INFO_DISCONTINUITY) {
+        // adaptive streaming, discontinuities in the playlist
         int32_t type;
         CHECK((*accessUnit)->meta()->findInt32("discontinuity", &type));
 
@@ -148,10 +186,7 @@
               extra == NULL ? "NULL" : extra->debugString().c_str());
 
         int32_t swap;
-        if (type == ATSParser::DISCONTINUITY_FORMATCHANGE
-                && (*accessUnit)->meta()->findInt32("swapPacketSource", &swap)
-                && swap) {
-
+        if ((*accessUnit)->meta()->findInt32("swapPacketSource", &swap) && swap) {
             int32_t switchGeneration;
             CHECK((*accessUnit)->meta()->findInt32("switchGeneration", &switchGeneration));
             {
@@ -164,13 +199,67 @@
                     msg->post();
                 }
             }
+        } else {
+            size_t seq = strm.mCurDiscontinuitySeq;
+            int64_t offsetTimeUs;
+            if (mDiscontinuityOffsetTimesUs.indexOfKey(seq) >= 0) {
+                offsetTimeUs = mDiscontinuityOffsetTimesUs.valueFor(seq);
+            } else {
+                offsetTimeUs = 0;
+            }
+
+            seq += 1;
+            if (mDiscontinuityAbsStartTimesUs.indexOfKey(strm.mCurDiscontinuitySeq) >= 0) {
+                int64_t firstTimeUs;
+                firstTimeUs = mDiscontinuityAbsStartTimesUs.valueFor(strm.mCurDiscontinuitySeq);
+                offsetTimeUs += strm.mLastDequeuedTimeUs - firstTimeUs;
+                offsetTimeUs += strm.mLastSampleDurationUs;
+            } else {
+                offsetTimeUs += strm.mLastSampleDurationUs;
+            }
+
+            mDiscontinuityOffsetTimesUs.add(seq, offsetTimeUs);
         }
     } else if (err == OK) {
+
         if (stream == STREAMTYPE_AUDIO || stream == STREAMTYPE_VIDEO) {
             int64_t timeUs;
+            int32_t discontinuitySeq = 0;
             CHECK((*accessUnit)->meta()->findInt64("timeUs",  &timeUs));
-            ALOGV("[%s] read buffer at time %" PRId64 " us", streamStr, timeUs);
+            (*accessUnit)->meta()->findInt32("discontinuitySeq", &discontinuitySeq);
+            strm.mCurDiscontinuitySeq = discontinuitySeq;
 
+            int32_t discard = 0;
+            int64_t firstTimeUs;
+            if (mDiscontinuityAbsStartTimesUs.indexOfKey(strm.mCurDiscontinuitySeq) >= 0) {
+                int64_t durUs; // approximate sample duration
+                if (timeUs > strm.mLastDequeuedTimeUs) {
+                    durUs = timeUs - strm.mLastDequeuedTimeUs;
+                } else {
+                    durUs = strm.mLastDequeuedTimeUs - timeUs;
+                }
+                strm.mLastSampleDurationUs = durUs;
+                firstTimeUs = mDiscontinuityAbsStartTimesUs.valueFor(strm.mCurDiscontinuitySeq);
+            } else if ((*accessUnit)->meta()->findInt32("discard", &discard) && discard) {
+                firstTimeUs = timeUs;
+            } else {
+                mDiscontinuityAbsStartTimesUs.add(strm.mCurDiscontinuitySeq, timeUs);
+                firstTimeUs = timeUs;
+            }
+
+            strm.mLastDequeuedTimeUs = timeUs;
+            if (timeUs >= firstTimeUs) {
+                timeUs -= firstTimeUs;
+            } else {
+                timeUs = 0;
+            }
+            timeUs += mLastSeekTimeUs;
+            if (mDiscontinuityOffsetTimesUs.indexOfKey(discontinuitySeq) >= 0) {
+                timeUs += mDiscontinuityOffsetTimesUs.valueFor(discontinuitySeq);
+            }
+
+            ALOGV("[%s] read buffer at time %" PRId64 " us", streamStr, timeUs);
+            (*accessUnit)->meta()->setInt64("timeUs",  timeUs);
             mLastDequeuedTimeUs = timeUs;
             mRealTimeBaseUs = ALooper::GetNowUs() - timeUs;
         } else if (stream == STREAMTYPE_SUBTITLES) {
@@ -289,7 +378,9 @@
                             break;
                         }
 
-                        tryToFinishBandwidthSwitch();
+                        if (mSwitchInProgress) {
+                            tryToFinishBandwidthSwitch();
+                        }
                     }
 
                     if (mContinuation != NULL) {
@@ -538,8 +629,9 @@
         mBandwidthItems.push(item);
     }
 
+    mPlaylist->pickRandomMediaItems();
     changeConfiguration(
-            0ll /* timeUs */, initialBandwidthIndex, true /* pickTrack */);
+            0ll /* timeUs */, initialBandwidthIndex, false /* pickTrack */);
 }
 
 void LiveSession::finishDisconnect() {
@@ -847,20 +939,20 @@
     // to lowest)
     const size_t kMinIndex = 0;
 
-    static ssize_t mPrevBandwidthIndex = -1;
+    static ssize_t mCurBandwidthIndex = -1;
 
     size_t index;
-    if (mPrevBandwidthIndex < 0) {
+    if (mCurBandwidthIndex < 0) {
         index = kMinIndex;
     } else if (uniformRand() < 0.5) {
-        index = (size_t)mPrevBandwidthIndex;
+        index = (size_t)mCurBandwidthIndex;
     } else {
-        index = mPrevBandwidthIndex + 1;
+        index = mCurBandwidthIndex + 1;
         if (index == mBandwidthItems.size()) {
             index = kMinIndex;
         }
     }
-    mPrevBandwidthIndex = index;
+    mCurBandwidthIndex = index;
 #elif 0
     // Pick the highest bandwidth stream below or equal to 1.2 Mbit/sec
 
@@ -937,7 +1029,10 @@
 status_t LiveSession::selectTrack(size_t index, bool select) {
     status_t err = mPlaylist->selectTrack(index, select);
     if (err == OK) {
-        (new AMessage(kWhatChangeConfiguration, id()))->post();
+        sp<AMessage> msg = new AMessage(kWhatChangeConfiguration, id());
+        msg->setInt32("bandwidthIndex", mCurBandwidthIndex);
+        msg->setInt32("pickTrack", select);
+        msg->post();
     }
     return err;
 }
@@ -964,15 +1059,11 @@
     CHECK(!mReconfigurationInProgress);
     mReconfigurationInProgress = true;
 
-    mPrevBandwidthIndex = bandwidthIndex;
+    mCurBandwidthIndex = bandwidthIndex;
 
     ALOGV("changeConfiguration => timeUs:%" PRId64 " us, bwIndex:%zu, pickTrack:%d",
           timeUs, bandwidthIndex, pickTrack);
 
-    if (pickTrack) {
-        mPlaylist->pickRandomMediaItems();
-    }
-
     CHECK_LT(bandwidthIndex, mBandwidthItems.size());
     const BandwidthItem &item = mBandwidthItems.itemAt(bandwidthIndex);
 
@@ -995,14 +1086,15 @@
 
         // If we're seeking all current fetchers are discarded.
         if (timeUs < 0ll) {
-            // delay fetcher removal
-            discardFetcher = false;
+            // delay fetcher removal if not picking tracks
+            discardFetcher = pickTrack;
 
             for (size_t j = 0; j < kMaxStreams; ++j) {
                 StreamType type = indexToType(j);
                 if ((streamMask & type) && uri == URIs[j]) {
                     resumeMask |= type;
                     streamMask &= ~type;
+                    discardFetcher = false;
                 }
             }
         }
@@ -1016,16 +1108,17 @@
 
     sp<AMessage> msg;
     if (timeUs < 0ll) {
-        // skip onChangeConfiguration2 (decoder destruction) if switching.
+        // skip onChangeConfiguration2 (decoder destruction) if not seeking.
         msg = new AMessage(kWhatChangeConfiguration3, id());
     } else {
         msg = new AMessage(kWhatChangeConfiguration2, id());
     }
     msg->setInt32("streamMask", streamMask);
     msg->setInt32("resumeMask", resumeMask);
+    msg->setInt32("pickTrack", pickTrack);
     msg->setInt64("timeUs", timeUs);
     for (size_t i = 0; i < kMaxStreams; ++i) {
-        if (streamMask & indexToType(i)) {
+        if ((streamMask | resumeMask) & indexToType(i)) {
             msg->setString(mStreams[i].uriKey().c_str(), URIs[i].c_str());
         }
     }
@@ -1049,7 +1142,10 @@
 
 void LiveSession::onChangeConfiguration(const sp<AMessage> &msg) {
     if (!mReconfigurationInProgress) {
-        changeConfiguration(-1ll /* timeUs */, getBandwidthIndex());
+        int32_t pickTrack = 0, bandwidthIndex = mCurBandwidthIndex;
+        msg->findInt32("pickTrack", &pickTrack);
+        msg->findInt32("bandwidthIndex", &bandwidthIndex);
+        changeConfiguration(-1ll /* timeUs */, bandwidthIndex, pickTrack);
     } else {
         msg->post(1000000ll); // retry in 1 sec
     }
@@ -1060,8 +1156,14 @@
 
     // All fetchers are either suspended or have been removed now.
 
-    uint32_t streamMask;
+    uint32_t streamMask, resumeMask;
     CHECK(msg->findInt32("streamMask", (int32_t *)&streamMask));
+    CHECK(msg->findInt32("resumeMask", (int32_t *)&resumeMask));
+
+    // currently onChangeConfiguration2 is only called for seeking;
+    // remove the following CHECK if using it else where.
+    CHECK_EQ(resumeMask, 0);
+    streamMask |= resumeMask;
 
     AString URIs[kMaxStreams];
     for (size_t i = 0; i < kMaxStreams; ++i) {
@@ -1125,16 +1227,21 @@
     }
 
     int64_t timeUs;
+    int32_t pickTrack;
     bool switching = false;
     CHECK(msg->findInt64("timeUs", &timeUs));
+    CHECK(msg->findInt32("pickTrack", &pickTrack));
 
     if (timeUs < 0ll) {
-        timeUs = mLastDequeuedTimeUs;
-        switching = true;
+        if (!pickTrack) {
+            switching = true;
+        }
+        mRealTimeBaseUs = ALooper::GetNowUs() - mLastDequeuedTimeUs;
+    } else {
+        mRealTimeBaseUs = ALooper::GetNowUs() - timeUs;
     }
-    mRealTimeBaseUs = ALooper::GetNowUs() - timeUs;
 
-    mNewStreamMask = streamMask;
+    mNewStreamMask = streamMask | resumeMask;
 
     // Of all existing fetchers:
     // * Resume fetchers that are still needed and assign them original packet sources.
@@ -1147,6 +1254,16 @@
         for (size_t j = 0; j < kMaxStreams; ++j) {
             if ((resumeMask & indexToType(j)) && uri == mStreams[j].mUri) {
                 sources[j] = mPacketSources.valueFor(indexToType(j));
+
+                if (j != kSubtitleIndex) {
+                    ALOGV("queueing dummy discontinuity for stream type %d", indexToType(j));
+                    sp<AnotherPacketSource> discontinuityQueue;
+                    discontinuityQueue = mDiscontinuities.valueFor(indexToType(j));
+                    discontinuityQueue->queueDiscontinuity(
+                            ATSParser::DISCONTINUITY_NONE,
+                            NULL,
+                            true);
+                }
             }
         }
 
@@ -1180,7 +1297,9 @@
         CHECK(fetcher != NULL);
 
         int32_t latestSeq = -1;
-        int64_t latestTimeUs = 0ll;
+        int64_t startTimeUs = -1;
+        int64_t segmentStartTimeUs = -1ll;
+        int32_t discontinuitySeq = -1;
         sp<AnotherPacketSource> sources[kMaxStreams];
 
         // TRICKY: looping from i as earlier streams are already removed from streamMask
@@ -1188,29 +1307,65 @@
             if ((streamMask & indexToType(j)) && uri == mStreams[j].mUri) {
                 sources[j] = mPacketSources.valueFor(indexToType(j));
 
-                if (!switching) {
+                if (timeUs >= 0) {
                     sources[j]->clear();
+                    startTimeUs = timeUs;
+
+                    sp<AnotherPacketSource> discontinuityQueue;
+                    sp<AMessage> extra = new AMessage;
+                    extra->setInt64("timeUs", timeUs);
+                    discontinuityQueue = mDiscontinuities.valueFor(indexToType(j));
+                    discontinuityQueue->queueDiscontinuity(
+                            ATSParser::DISCONTINUITY_SEEK, extra, true);
                 } else {
-                    int32_t type, seq;
-                    int64_t srcTimeUs;
-                    sp<AMessage> meta = sources[j]->getLatestMeta();
+                    int32_t type;
+                    int64_t srcSegmentStartTimeUs;
+                    sp<AMessage> meta;
+                    if (pickTrack) {
+                        // selecting
+                        meta = sources[j]->getLatestDequeuedMeta();
+                    } else {
+                        // adapting
+                        meta = sources[j]->getLatestEnqueuedMeta();
+                    }
 
                     if (meta != NULL && !meta->findInt32("discontinuity", &type)) {
-                        CHECK(meta->findInt32("seq", &seq));
-                        if (seq > latestSeq) {
-                            latestSeq = seq;
+                        int64_t tmpUs;
+                        CHECK(meta->findInt64("timeUs", &tmpUs));
+                        if (startTimeUs < 0 || tmpUs < startTimeUs) {
+                            startTimeUs = tmpUs;
                         }
-                        CHECK(meta->findInt64("timeUs", &srcTimeUs));
-                        if (srcTimeUs > latestTimeUs) {
-                            latestTimeUs = srcTimeUs;
+
+                        CHECK(meta->findInt64("segmentStartTimeUs", &tmpUs));
+                        if (segmentStartTimeUs < 0 || tmpUs < segmentStartTimeUs) {
+                            segmentStartTimeUs = tmpUs;
+                        }
+
+                        int32_t seq;
+                        CHECK(meta->findInt32("discontinuitySeq", &seq));
+                        if (discontinuitySeq < 0 || seq < discontinuitySeq) {
+                            discontinuitySeq = seq;
                         }
                     }
 
-                    sources[j] = mPacketSources2.valueFor(indexToType(j));
-                    sources[j]->clear();
-                    uint32_t extraStreams = mNewStreamMask & (~mStreamMask);
-                    if (extraStreams & indexToType(j)) {
-                        sources[j]->queueAccessUnit(createFormatChangeBuffer(/* swap = */ false));
+                    if (pickTrack) {
+                        // selecting track, queue discontinuities before content
+                        sources[j]->clear();
+                        if (j == kSubtitleIndex) {
+                            break;
+                        }
+                        sp<AnotherPacketSource> discontinuityQueue;
+                        discontinuityQueue = mDiscontinuities.valueFor(indexToType(j));
+                        discontinuityQueue->queueDiscontinuity(
+                                ATSParser::DISCONTINUITY_FORMATCHANGE, NULL, true);
+                    } else {
+                        // adapting, queue discontinuities after resume
+                        sources[j] = mPacketSources2.valueFor(indexToType(j));
+                        sources[j]->clear();
+                        uint32_t extraStreams = mNewStreamMask & (~mStreamMask);
+                        if (extraStreams & indexToType(j)) {
+                            sources[j]->queueAccessUnit(createFormatChangeBuffer(/*swap*/ false));
+                        }
                     }
                 }
 
@@ -1222,9 +1377,10 @@
                 sources[kAudioIndex],
                 sources[kVideoIndex],
                 sources[kSubtitleIndex],
-                timeUs,
-                latestTimeUs /* min start time(us) */,
-                latestSeq >= 0 ? latestSeq + 1 : -1 /* starting sequence number hint */ );
+                startTimeUs < 0 ? mLastSeekTimeUs : startTimeUs,
+                segmentStartTimeUs,
+                discontinuitySeq,
+                switching);
     }
 
     // All fetchers have now been started, the configuration change
@@ -1236,6 +1392,7 @@
     mReconfigurationInProgress = false;
     if (switching) {
         mSwitchInProgress = true;
+        mSwapMask = streamMask;
     } else {
         mStreamMask = mNewStreamMask;
     }
@@ -1254,8 +1411,8 @@
 
     int32_t stream;
     CHECK(msg->findInt32("stream", &stream));
-    mSwapMask |= stream;
-    if (mSwapMask != mStreamMask) {
+    mSwapMask &= ~stream;
+    if (mSwapMask != 0) {
         return;
     }
 
@@ -1271,9 +1428,12 @@
 }
 
 // Mark switch done when:
-//   1. all old buffers are swapped out, AND
-//   2. all old fetchers are removed.
+//   1. all old buffers are swapped out
 void LiveSession::tryToFinishBandwidthSwitch() {
+    if (!mSwitchInProgress) {
+        return;
+    }
+
     bool needToRemoveFetchers = false;
     for (size_t i = 0; i < mFetcherInfos.size(); ++i) {
         if (mFetcherInfos.valueAt(i).mToBeRemoved) {
@@ -1281,10 +1441,11 @@
             break;
         }
     }
-    if (!needToRemoveFetchers && mSwapMask == mStreamMask) {
+
+    if (!needToRemoveFetchers && mSwapMask == 0) {
+        ALOGI("mSwitchInProgress = false");
         mStreamMask = mNewStreamMask;
         mSwitchInProgress = false;
-        mSwapMask = 0;
     }
 }
 
@@ -1310,13 +1471,13 @@
         return false;
     }
 
-    if (mPrevBandwidthIndex < 0) {
+    if (mCurBandwidthIndex < 0) {
         return true;
     }
 
-    if (bandwidthIndex == (size_t)mPrevBandwidthIndex) {
+    if (bandwidthIndex == (size_t)mCurBandwidthIndex) {
         return false;
-    } else if (bandwidthIndex > (size_t)mPrevBandwidthIndex) {
+    } else if (bandwidthIndex > (size_t)mCurBandwidthIndex) {
         return canSwitchUp();
     } else {
         return true;
diff --git a/media/libstagefright/httplive/LiveSession.h b/media/libstagefright/httplive/LiveSession.h
index ed3818f..5423f0f 100644
--- a/media/libstagefright/httplive/LiveSession.h
+++ b/media/libstagefright/httplive/LiveSession.h
@@ -125,8 +125,19 @@
     struct StreamItem {
         const char *mType;
         AString mUri;
-        StreamItem() : mType("") {}
-        StreamItem(const char *type) : mType(type) {}
+        size_t mCurDiscontinuitySeq;
+        int64_t mLastDequeuedTimeUs;
+        int64_t mLastSampleDurationUs;
+        StreamItem()
+            : mType(""),
+              mCurDiscontinuitySeq(0),
+              mLastDequeuedTimeUs(0),
+              mLastSampleDurationUs(0) {}
+        StreamItem(const char *type)
+            : mType(type),
+              mCurDiscontinuitySeq(0),
+              mLastDequeuedTimeUs(0),
+              mLastSampleDurationUs(0) {}
         AString uriKey() {
             AString key(mType);
             key.append("URI");
@@ -147,7 +158,7 @@
     AString mMasterURL;
 
     Vector<BandwidthItem> mBandwidthItems;
-    ssize_t mPrevBandwidthIndex;
+    ssize_t mCurBandwidthIndex;
 
     sp<M3UParser> mPlaylist;
 
@@ -163,6 +174,7 @@
     // we use this to track reconfiguration progress.
     uint32_t mSwapMask;
 
+    KeyedVector<StreamType, sp<AnotherPacketSource> > mDiscontinuities;
     KeyedVector<StreamType, sp<AnotherPacketSource> > mPacketSources;
     // A second set of packet sources that buffer content for the variant we're switching to.
     KeyedVector<StreamType, sp<AnotherPacketSource> > mPacketSources2;
@@ -187,6 +199,12 @@
     uint32_t mDisconnectReplyID;
     uint32_t mSeekReplyID;
 
+    bool mFirstTimeUsValid;
+    int64_t mFirstTimeUs;
+    int64_t mLastSeekTimeUs;
+    KeyedVector<size_t, int64_t> mDiscontinuityAbsStartTimesUs;
+    KeyedVector<size_t, int64_t> mDiscontinuityOffsetTimesUs;
+
     sp<PlaylistFetcher> addFetcher(const char *uri);
 
     void onConnect(const sp<AMessage> &msg);
diff --git a/media/libstagefright/httplive/M3UParser.cpp b/media/libstagefright/httplive/M3UParser.cpp
index efd852c..1651dee 100644
--- a/media/libstagefright/httplive/M3UParser.cpp
+++ b/media/libstagefright/httplive/M3UParser.cpp
@@ -157,8 +157,8 @@
 }
 
 status_t M3UParser::MediaGroup::selectTrack(size_t index, bool select) {
-    if (mType != TYPE_SUBS) {
-        ALOGE("only select subtitile tracks for now!");
+    if (mType != TYPE_SUBS && mType != TYPE_AUDIO) {
+        ALOGE("only select subtitile/audio tracks for now!");
         return INVALID_OPERATION;
     }
 
@@ -246,6 +246,7 @@
       mIsVariantPlaylist(false),
       mIsComplete(false),
       mIsEvent(false),
+      mDiscontinuitySeq(0),
       mSelectedIndex(-1) {
     mInitCheck = parse(data, size);
 }
@@ -273,6 +274,10 @@
     return mIsEvent;
 }
 
+size_t M3UParser::getDiscontinuitySeq() const {
+    return mDiscontinuitySeq;
+}
+
 sp<AMessage> M3UParser::meta() {
     return mMeta;
 }
@@ -567,6 +572,12 @@
                 }
             } else if (line.startsWith("#EXT-X-MEDIA")) {
                 err = parseMedia(line);
+            } else if (line.startsWith("#EXT-X-DISCONTINUITY-SEQUENCE")) {
+                size_t seq;
+                err = parseDiscontinuitySequence(line, &seq);
+                if (err == OK) {
+                    mDiscontinuitySeq = seq;
+                }
             }
 
             if (err != OK) {
@@ -1110,6 +1121,30 @@
 }
 
 // static
+status_t M3UParser::parseDiscontinuitySequence(const AString &line, size_t *seq) {
+    ssize_t colonPos = line.find(":");
+
+    if (colonPos < 0) {
+        return ERROR_MALFORMED;
+    }
+
+    int32_t x;
+    status_t err = ParseInt32(line.c_str() + colonPos + 1, &x);
+    if (err != OK) {
+        return err;
+    }
+
+    if (x < 0) {
+        return ERROR_MALFORMED;
+    }
+
+    if (seq) {
+        *seq = x;
+    }
+    return OK;
+}
+
+// static
 status_t M3UParser::ParseInt32(const char *s, int32_t *x) {
     char *end;
     long lval = strtol(s, &end, 10);
diff --git a/media/libstagefright/httplive/M3UParser.h b/media/libstagefright/httplive/M3UParser.h
index fe9fb9d..d588afe 100644
--- a/media/libstagefright/httplive/M3UParser.h
+++ b/media/libstagefright/httplive/M3UParser.h
@@ -34,6 +34,7 @@
     bool isVariantPlaylist() const;
     bool isComplete() const;
     bool isEvent() const;
+    size_t getDiscontinuitySeq() const;
 
     sp<AMessage> meta();
 
@@ -66,6 +67,7 @@
     bool mIsVariantPlaylist;
     bool mIsComplete;
     bool mIsEvent;
+    size_t mDiscontinuitySeq;
 
     sp<AMessage> mMeta;
     Vector<Item> mItems;
@@ -94,6 +96,8 @@
 
     status_t parseMedia(const AString &line);
 
+    static status_t parseDiscontinuitySequence(const AString &line, size_t *seq);
+
     static status_t ParseInt32(const char *s, int32_t *x);
     static status_t ParseDouble(const char *s, double *x);
 
diff --git a/media/libstagefright/httplive/PlaylistFetcher.cpp b/media/libstagefright/httplive/PlaylistFetcher.cpp
index 10437c9..80cb2d0 100644
--- a/media/libstagefright/httplive/PlaylistFetcher.cpp
+++ b/media/libstagefright/httplive/PlaylistFetcher.cpp
@@ -49,7 +49,7 @@
 // static
 const int64_t PlaylistFetcher::kMinBufferedDurationUs = 10000000ll;
 const int64_t PlaylistFetcher::kMaxMonitorDelayUs = 3000000ll;
-const int32_t PlaylistFetcher::kDownloadBlockSize = 192;
+const int32_t PlaylistFetcher::kDownloadBlockSize = 2048;
 const int32_t PlaylistFetcher::kNumSkipFrames = 10;
 
 PlaylistFetcher::PlaylistFetcher(
@@ -62,19 +62,21 @@
       mURI(uri),
       mStreamTypeMask(0),
       mStartTimeUs(-1ll),
-      mMinStartTimeUs(0ll),
-      mStopParams(NULL),
+      mSegmentStartTimeUs(-1ll),
+      mDiscontinuitySeq(-1ll),
+      mStartTimeUsRelative(false),
       mLastPlaylistFetchTimeUs(-1ll),
       mSeqNumber(-1),
       mNumRetries(0),
       mStartup(true),
+      mAdaptive(false),
       mPrepared(false),
-      mSkipToFirstIDRAfterConnect(false),
       mNextPTSTimeUs(-1ll),
       mMonitorQueueGeneration(0),
       mRefreshState(INITIAL_MINIMUM_RELOAD_DELAY),
       mFirstPTSValid(false),
-      mAbsoluteTimeAnchorUs(0ll) {
+      mAbsoluteTimeAnchorUs(0ll),
+      mVideoBuffer(new AnotherPacketSource(NULL)) {
     memset(mPlaylistHash, 0, sizeof(mPlaylistHash));
     mStartTimeUsNotify->setInt32("what", kWhatStartedAt);
     mStartTimeUsNotify->setInt32("streamMask", 0);
@@ -335,8 +337,9 @@
         const sp<AnotherPacketSource> &videoSource,
         const sp<AnotherPacketSource> &subtitleSource,
         int64_t startTimeUs,
-        int64_t minStartTimeUs,
-        int32_t startSeqNumberHint) {
+        int64_t segmentStartTimeUs,
+        int32_t startDiscontinuitySeq,
+        bool adaptive) {
     sp<AMessage> msg = new AMessage(kWhatStart, id());
 
     uint32_t streamTypeMask = 0ul;
@@ -358,8 +361,9 @@
 
     msg->setInt32("streamTypeMask", streamTypeMask);
     msg->setInt64("startTimeUs", startTimeUs);
-    msg->setInt64("minStartTimeUs", minStartTimeUs);
-    msg->setInt32("startSeqNumberHint", startSeqNumberHint);
+    msg->setInt64("segmentStartTimeUs", segmentStartTimeUs);
+    msg->setInt32("startDiscontinuitySeq", startDiscontinuitySeq);
+    msg->setInt32("adaptive", adaptive);
     msg->post();
 }
 
@@ -367,9 +371,9 @@
     (new AMessage(kWhatPause, id()))->post();
 }
 
-void PlaylistFetcher::stopAsync(bool selfTriggered) {
+void PlaylistFetcher::stopAsync(bool clear) {
     sp<AMessage> msg = new AMessage(kWhatStop, id());
-    msg->setInt32("selfTriggered", selfTriggered);
+    msg->setInt32("clear", clear);
     msg->post();
 }
 
@@ -449,10 +453,13 @@
     CHECK(msg->findInt32("streamTypeMask", (int32_t *)&streamTypeMask));
 
     int64_t startTimeUs;
-    int32_t startSeqNumberHint;
+    int64_t segmentStartTimeUs;
+    int32_t startDiscontinuitySeq;
+    int32_t adaptive;
     CHECK(msg->findInt64("startTimeUs", &startTimeUs));
-    CHECK(msg->findInt64("minStartTimeUs", (int64_t *) &mMinStartTimeUs));
-    CHECK(msg->findInt32("startSeqNumberHint", &startSeqNumberHint));
+    CHECK(msg->findInt64("segmentStartTimeUs", &segmentStartTimeUs));
+    CHECK(msg->findInt32("startDiscontinuitySeq", &startDiscontinuitySeq));
+    CHECK(msg->findInt32("adaptive", &adaptive));
 
     if (streamTypeMask & LiveSession::STREAMTYPE_AUDIO) {
         void *ptr;
@@ -482,16 +489,16 @@
     }
 
     mStreamTypeMask = streamTypeMask;
+
     mStartTimeUs = startTimeUs;
+    mSegmentStartTimeUs = segmentStartTimeUs;
+    mDiscontinuitySeq = startDiscontinuitySeq;
 
     if (mStartTimeUs >= 0ll) {
         mSeqNumber = -1;
         mStartup = true;
         mPrepared = false;
-    }
-
-    if (startSeqNumberHint >= 0) {
-        mSeqNumber = startSeqNumberHint;
+        mAdaptive = adaptive;
     }
 
     postMonitorQueue();
@@ -506,11 +513,9 @@
 void PlaylistFetcher::onStop(const sp<AMessage> &msg) {
     cancelMonitorQueue();
 
-    int32_t selfTriggered;
-    CHECK(msg->findInt32("selfTriggered", &selfTriggered));
-    if (!selfTriggered) {
-        // Self triggered stops only happen during switching, in which case we do not want
-        // to clear the discontinuities queued at the end of packet sources.
+    int32_t clear;
+    CHECK(msg->findInt32("clear", &clear));
+    if (clear) {
         for (size_t i = 0; i < mPacketSources.size(); i++) {
             sp<AnotherPacketSource> packetSource = mPacketSources.valueAt(i);
             packetSource->clear();
@@ -552,15 +557,16 @@
         }
 
         // Don't resume if we would stop within a resume threshold.
+        int32_t discontinuitySeq;
         int64_t latestTimeUs = 0, stopTimeUs = 0;
-        sp<AMessage> latestMeta = packetSource->getLatestMeta();
+        sp<AMessage> latestMeta = packetSource->getLatestDequeuedMeta();
         if (latestMeta != NULL
-                && (latestMeta->findInt64("timeUs", &latestTimeUs)
-                && params->findInt64(stopKey, &stopTimeUs))) {
-            int64_t diffUs = stopTimeUs - latestTimeUs;
-            if (diffUs < resumeThreshold(latestMeta)) {
-                stop = true;
-            }
+                && latestMeta->findInt32("discontinuitySeq", &discontinuitySeq)
+                && discontinuitySeq == mDiscontinuitySeq
+                && latestMeta->findInt64("timeUs", &latestTimeUs)
+                && params->findInt64(stopKey, &stopTimeUs)
+                && stopTimeUs - latestTimeUs < resumeThreshold(latestMeta)) {
+            stop = true;
         }
     }
 
@@ -568,7 +574,7 @@
         for (size_t i = 0; i < mPacketSources.size(); i++) {
             mPacketSources.valueAt(i)->queueAccessUnit(mSession->createFormatChangeBuffer());
         }
-        stopAsync(/* selfTriggered = */ true);
+        stopAsync(/* clear = */ false);
         return OK;
     }
 
@@ -737,26 +743,47 @@
         mSeqNumber = lastSeqNumberInPlaylist;
     }
 
+    if (mDiscontinuitySeq < 0) {
+        mDiscontinuitySeq = mPlaylist->getDiscontinuitySeq();
+    }
+
     if (mSeqNumber < 0) {
         CHECK_GE(mStartTimeUs, 0ll);
 
-        if (mPlaylist->isComplete() || mPlaylist->isEvent()) {
-            mSeqNumber = getSeqNumberForTime(mStartTimeUs);
+        if (mSegmentStartTimeUs < 0) {
+            if (!mPlaylist->isComplete() && !mPlaylist->isEvent()) {
+                // If this is a live session, start 3 segments from the end on connect
+                mSeqNumber = lastSeqNumberInPlaylist - 3;
+            } else {
+                mSeqNumber = getSeqNumberForTime(mStartTimeUs);
+                mStartTimeUs -= getSegmentStartTimeUs(mSeqNumber);
+            }
+            mStartTimeUsRelative = true;
             ALOGV("Initial sequence number for time %" PRId64 " is %d from (%d .. %d)",
                     mStartTimeUs, mSeqNumber, firstSeqNumberInPlaylist,
                     lastSeqNumberInPlaylist);
         } else {
-            // If this is a live session, start 3 segments from the end.
-            mSeqNumber = lastSeqNumberInPlaylist - 3;
+            mSeqNumber = getSeqNumberForTime(mSegmentStartTimeUs);
+            if (mAdaptive) {
+                // avoid double fetch/decode
+                mSeqNumber += 1;
+            }
+            ssize_t minSeq = getSeqNumberForDiscontinuity(mDiscontinuitySeq);
+            if (mSeqNumber < minSeq) {
+                mSeqNumber = minSeq;
+            }
+
             if (mSeqNumber < firstSeqNumberInPlaylist) {
                 mSeqNumber = firstSeqNumberInPlaylist;
             }
+
+            if (mSeqNumber > lastSeqNumberInPlaylist) {
+                mSeqNumber = lastSeqNumberInPlaylist;
+            }
             ALOGV("Initial sequence number for live event %d from (%d .. %d)",
                     mSeqNumber, firstSeqNumberInPlaylist,
                     lastSeqNumberInPlaylist);
         }
-
-        mStartTimeUs = -1ll;
     }
 
     if (mSeqNumber < firstSeqNumberInPlaylist
@@ -819,6 +846,7 @@
 
     int32_t val;
     if (itemMeta->findInt32("discontinuity", &val) && val != 0) {
+        mDiscontinuitySeq++;
         discontinuity = true;
     }
 
@@ -850,6 +878,7 @@
     }
 
     // block-wise download
+    bool startup = mStartup;
     ssize_t bytesRead;
     do {
         bytesRead = mSession->fetchFile(
@@ -879,7 +908,7 @@
             return;
         }
 
-        if (mStartup || discontinuity) {
+        if (startup || discontinuity) {
             // Signal discontinuity.
 
             if (mPlaylist->isComplete() || mPlaylist->isEvent()) {
@@ -898,6 +927,8 @@
 
                 discontinuity = false;
             }
+
+            startup = false;
         }
 
         err = OK;
@@ -917,24 +948,19 @@
         }
 
         if (err == -EAGAIN) {
-            // bad starting sequence number hint
+            // starting sequence number too low
             mTSParser.clear();
             postMonitorQueue();
             return;
-        }
-
-        if (err == ERROR_OUT_OF_RANGE) {
+        } else if (err == ERROR_OUT_OF_RANGE) {
             // reached stopping point
-            stopAsync(/* selfTriggered = */ true);
+            stopAsync(/* clear = */ false);
             return;
-        }
-
-        if (err != OK) {
+        } else if (err != OK) {
             notifyError(err);
             return;
         }
 
-        mStartup = false;
     } while (bytesRead != 0);
 
     if (bufferStartsWithTsSyncByte(buffer)) {
@@ -994,11 +1020,44 @@
         return;
     }
 
+    mStartup = false;
     ++mSeqNumber;
 
     postMonitorQueue();
 }
 
+int32_t PlaylistFetcher::getSeqNumberForDiscontinuity(size_t discontinuitySeq) const {
+    int32_t firstSeqNumberInPlaylist;
+    if (mPlaylist->meta() == NULL
+            || !mPlaylist->meta()->findInt32("media-sequence", &firstSeqNumberInPlaylist)) {
+        firstSeqNumberInPlaylist = 0;
+    }
+
+    size_t curDiscontinuitySeq = mPlaylist->getDiscontinuitySeq();
+    if (discontinuitySeq < curDiscontinuitySeq) {
+        return firstSeqNumberInPlaylist <= 0 ? 0 : (firstSeqNumberInPlaylist - 1);
+    }
+
+    size_t index = 0;
+    while (index < mPlaylist->size()) {
+        sp<AMessage> itemMeta;
+        CHECK(mPlaylist->itemAt( index, NULL /* uri */, &itemMeta));
+
+        int64_t discontinuity;
+        if (itemMeta->findInt64("discontinuity", &discontinuity)) {
+            curDiscontinuitySeq++;
+        }
+
+        if (curDiscontinuitySeq == discontinuitySeq) {
+            return firstSeqNumberInPlaylist + index;
+        }
+
+        ++index;
+    }
+
+    return firstSeqNumberInPlaylist + mPlaylist->size();
+}
+
 int32_t PlaylistFetcher::getSeqNumberForTime(int64_t timeUs) const {
     int32_t firstSeqNumberInPlaylist;
     if (mPlaylist->meta() == NULL || !mPlaylist->meta()->findInt32(
@@ -1031,6 +1090,23 @@
     return firstSeqNumberInPlaylist + index;
 }
 
+const sp<ABuffer> &PlaylistFetcher::setAccessUnitProperties(
+        const sp<ABuffer> &accessUnit, const sp<AnotherPacketSource> &source, bool discard) {
+    sp<MetaData> format = source->getFormat();
+    if (format != NULL) {
+        // for simplicity, store a reference to the format in each unit
+        accessUnit->meta()->setObject("format", format);
+    }
+
+    if (discard) {
+        accessUnit->meta()->setInt32("discard", discard);
+    }
+
+    accessUnit->meta()->setInt32("discontinuitySeq", mDiscontinuitySeq);
+    accessUnit->meta()->setInt64("segmentStartTimeUs", getSegmentStartTimeUs(mSeqNumber));
+    return accessUnit;
+}
+
 status_t PlaylistFetcher::extractAndQueueAccessUnitsFromTs(const sp<ABuffer> &buffer) {
     if (mTSParser == NULL) {
         // Use TS_TIMESTAMPS_ARE_ABSOLUTE so pts carry over between fetchers.
@@ -1046,7 +1122,9 @@
         mTSParser->signalDiscontinuity(
                 ATSParser::DISCONTINUITY_SEEK, extra);
 
+        mAbsoluteTimeAnchorUs = mNextPTSTimeUs;
         mNextPTSTimeUs = -1ll;
+        mFirstPTSValid = false;
     }
 
     size_t offset = 0;
@@ -1099,46 +1177,30 @@
             continue;
         }
 
-        if (stream == LiveSession::STREAMTYPE_VIDEO && mVideoMime.empty()) {
-            const char *mime;
-            if (source->getFormat()->findCString(kKeyMIMEType, &mime)) {
-                mVideoMime.setTo(mime);
-                if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AVC)) {
-                    mSkipToFirstIDRAfterConnect = true;
-                }
-            }
-        }
-
         int64_t timeUs;
         sp<ABuffer> accessUnit;
         status_t finalResult;
         while (source->hasBufferAvailable(&finalResult)
                 && source->dequeueAccessUnit(&accessUnit) == OK) {
 
-            if (stream == LiveSession::STREAMTYPE_VIDEO && mSkipToFirstIDRAfterConnect) {
-                if (!IsIDR(accessUnit)) {
-                    continue;
-                } else {
-                    mSkipToFirstIDRAfterConnect = false;
-                }
-            }
-
             CHECK(accessUnit->meta()->findInt64("timeUs", &timeUs));
-            if (mMinStartTimeUs > 0) {
-                if (timeUs < mMinStartTimeUs) {
-                    // TODO untested path
-                    // try a later ts
-                    int32_t targetDuration;
-                    mPlaylist->meta()->findInt32("target-duration", &targetDuration);
-                    int32_t incr = (mMinStartTimeUs - timeUs) / 1000000 / targetDuration;
-                    if (incr == 0) {
-                        // increment mSeqNumber by at least one
-                        incr = 1;
+
+            if (mStartup) {
+                if (!mFirstPTSValid) {
+                    mFirstTimeUs = timeUs;
+                    mFirstPTSValid = true;
+                }
+                if (mStartTimeUsRelative) {
+                    timeUs -= mFirstTimeUs;
+                    if (timeUs < 0) {
+                        timeUs = 0;
                     }
-                    mSeqNumber += incr;
-                    err = -EAGAIN;
-                    break;
-                } else {
+                } else if (mAdaptive && timeUs > mStartTimeUs) {
+                    int32_t seq;
+                    if (mStartTimeUsNotify != NULL
+                            && !mStartTimeUsNotify->findInt32("discontinuitySeq", &seq)) {
+                        mStartTimeUsNotify->setInt32("discontinuitySeq", mDiscontinuitySeq);
+                    }
                     int64_t startTimeUs;
                     if (mStartTimeUsNotify != NULL
                             && !mStartTimeUsNotify->findInt64(key, &startTimeUs)) {
@@ -1155,12 +1217,51 @@
                         }
                     }
                 }
+
+                if (timeUs < mStartTimeUs) {
+                    if (mAdaptive) {
+                        int32_t targetDuration;
+                        mPlaylist->meta()->findInt32("target-duration", &targetDuration);
+                        int32_t incr = (mStartTimeUs - timeUs) / 1000000 / targetDuration;
+                        if (incr == 0) {
+                            // increment mSeqNumber by at least one
+                            incr = 1;
+                        }
+                        mSeqNumber += incr;
+                        err = -EAGAIN;
+                        break;
+                    } else {
+                        // buffer up to the closest preceding IDR frame
+                        ALOGV("timeUs %" PRId64 " us < mStartTimeUs %" PRId64 " us",
+                                timeUs, mStartTimeUs);
+                        const char *mime;
+                        sp<MetaData> format  = source->getFormat();
+                        bool isAvc = false;
+                        if (format != NULL && format->findCString(kKeyMIMEType, &mime)
+                                && !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AVC)) {
+                            isAvc = true;
+                        }
+                        if (isAvc && IsIDR(accessUnit)) {
+                            mVideoBuffer->clear();
+                        }
+                        if (isAvc) {
+                            mVideoBuffer->queueAccessUnit(accessUnit);
+                        }
+
+                        continue;
+                    }
+                }
             }
 
             if (mStopParams != NULL) {
                 // Queue discontinuity in original stream.
+                int32_t discontinuitySeq;
                 int64_t stopTimeUs;
-                if (!mStopParams->findInt64(key, &stopTimeUs) || timeUs >= stopTimeUs) {
+                if (!mStopParams->findInt32("discontinuitySeq", &discontinuitySeq)
+                        || discontinuitySeq > mDiscontinuitySeq
+                        || !mStopParams->findInt64(key, &stopTimeUs)
+                        || (discontinuitySeq == mDiscontinuitySeq
+                                && timeUs >= stopTimeUs)) {
                     packetSource->queueAccessUnit(mSession->createFormatChangeBuffer());
                     mStreamTypeMask &= ~stream;
                     mPacketSources.removeItemsAt(i);
@@ -1169,15 +1270,18 @@
             }
 
             // Note that we do NOT dequeue any discontinuities except for format change.
-
-            // for simplicity, store a reference to the format in each unit
-            sp<MetaData> format = source->getFormat();
-            if (format != NULL) {
-                accessUnit->meta()->setObject("format", format);
+            if (stream == LiveSession::STREAMTYPE_VIDEO) {
+                const bool discard = true;
+                status_t status;
+                while (mVideoBuffer->hasBufferAvailable(&status)) {
+                    sp<ABuffer> videoBuffer;
+                    mVideoBuffer->dequeueAccessUnit(&videoBuffer);
+                    setAccessUnitProperties(videoBuffer, source, discard);
+                    packetSource->queueAccessUnit(videoBuffer);
+                }
             }
 
-            // Stash the sequence number so we can hint future playlist where to start at.
-            accessUnit->meta()->setInt32("seq", mSeqNumber);
+            setAccessUnitProperties(accessUnit, source);
             packetSource->queueAccessUnit(accessUnit);
         }
 
@@ -1244,7 +1348,8 @@
         CHECK(itemMeta->findInt64("durationUs", &durationUs));
         buffer->meta()->setInt64("timeUs", getSegmentStartTimeUs(mSeqNumber));
         buffer->meta()->setInt64("durationUs", durationUs);
-        buffer->meta()->setInt32("seq", mSeqNumber);
+        buffer->meta()->setInt64("segmentStartTimeUs", getSegmentStartTimeUs(mSeqNumber));
+        buffer->meta()->setInt32("discontinuitySeq", mDiscontinuitySeq);
 
         packetSource->queueAccessUnit(buffer);
         return OK;
@@ -1310,14 +1415,6 @@
         firstID3Tag = false;
     }
 
-    if (!mFirstPTSValid) {
-        mFirstPTSValid = true;
-        mFirstPTS = PTS;
-    }
-    PTS -= mFirstPTS;
-
-    int64_t timeUs = (PTS * 100ll) / 9ll + mAbsoluteTimeAnchorUs;
-
     if (mStreamTypeMask != LiveSession::STREAMTYPE_AUDIO) {
         ALOGW("This stream only contains audio data!");
 
@@ -1360,6 +1457,12 @@
     int32_t sampleRate;
     CHECK(packetSource->getFormat()->findInt32(kKeySampleRate, &sampleRate));
 
+    int64_t timeUs = (PTS * 100ll) / 9ll;
+    if (!mFirstPTSValid) {
+        mFirstPTSValid = true;
+        mFirstTimeUs = timeUs;
+    }
+
     size_t offset = 0;
     while (offset < buffer->size()) {
         const uint8_t *adtsHeader = buffer->data() + offset;
@@ -1384,19 +1487,32 @@
 
         CHECK_LE(offset + aac_frame_length, buffer->size());
 
-        sp<ABuffer> unit = new ABuffer(aac_frame_length);
-        memcpy(unit->data(), adtsHeader, aac_frame_length);
-
         int64_t unitTimeUs = timeUs + numSamples * 1000000ll / sampleRate;
-        unit->meta()->setInt64("timeUs", unitTimeUs);
+        offset += aac_frame_length;
 
         // Each AAC frame encodes 1024 samples.
         numSamples += 1024;
 
-        unit->meta()->setInt32("seq", mSeqNumber);
-        packetSource->queueAccessUnit(unit);
+        if (mStartup) {
+            int64_t startTimeUs = unitTimeUs;
+            if (mStartTimeUsRelative) {
+                startTimeUs -= mFirstTimeUs;
+                if (startTimeUs  < 0) {
+                    startTimeUs = 0;
+                }
+            }
+            if (startTimeUs < mStartTimeUs) {
+                continue;
+            }
+        }
 
-        offset += aac_frame_length;
+        sp<ABuffer> unit = new ABuffer(aac_frame_length);
+        memcpy(unit->data(), adtsHeader, aac_frame_length);
+
+        unit->meta()->setInt64("timeUs", unitTimeUs);
+        unit->meta()->setInt64("segmentStartTimeUs", getSegmentStartTimeUs(mSeqNumber));
+        unit->meta()->setInt32("discontinuitySeq", mDiscontinuitySeq);
+        packetSource->queueAccessUnit(unit);
     }
 
     return OK;
diff --git a/media/libstagefright/httplive/PlaylistFetcher.h b/media/libstagefright/httplive/PlaylistFetcher.h
index e4fdbff..daefb26 100644
--- a/media/libstagefright/httplive/PlaylistFetcher.h
+++ b/media/libstagefright/httplive/PlaylistFetcher.h
@@ -57,13 +57,15 @@
             const sp<AnotherPacketSource> &audioSource,
             const sp<AnotherPacketSource> &videoSource,
             const sp<AnotherPacketSource> &subtitleSource,
-            int64_t startTimeUs = -1ll,
-            int64_t minStartTimeUs = 0ll /* start after this timestamp */,
-            int32_t startSeqNumberHint = -1 /* try starting at this sequence number */);
+            int64_t startTimeUs = -1ll,         // starting timestamps
+            int64_t segmentStartTimeUs = -1ll, // starting position within playlist
+            // startTimeUs!=segmentStartTimeUs only when playlist is live
+            int32_t startDiscontinuitySeq = 0,
+            bool adaptive = false);
 
     void pauseAsync();
 
-    void stopAsync(bool selfTriggered = false);
+    void stopAsync(bool clear = true);
 
     void resumeUntilAsync(const sp<AMessage> &params);
 
@@ -99,11 +101,12 @@
 
     sp<LiveSession> mSession;
     AString mURI;
-    AString mVideoMime;
 
     uint32_t mStreamTypeMask;
     int64_t mStartTimeUs;
-    int64_t mMinStartTimeUs; // start fetching no earlier than this value
+    int64_t mSegmentStartTimeUs;
+    ssize_t mDiscontinuitySeq;
+    bool mStartTimeUsRelative;
     sp<AMessage> mStopParams; // message containing the latest timestamps we should fetch.
 
     KeyedVector<LiveSession::StreamType, sp<AnotherPacketSource> >
@@ -116,8 +119,8 @@
     int32_t mSeqNumber;
     int32_t mNumRetries;
     bool mStartup;
+    bool mAdaptive;
     bool mPrepared;
-    bool mSkipToFirstIDRAfterConnect;
     int64_t mNextPTSTimeUs;
 
     int32_t mMonitorQueueGeneration;
@@ -136,7 +139,9 @@
 
     bool mFirstPTSValid;
     uint64_t mFirstPTS;
+    int64_t mFirstTimeUs;
     int64_t mAbsoluteTimeAnchorUs;
+    sp<AnotherPacketSource> mVideoBuffer;
 
     // Stores the initialization vector to decrypt the next block of cipher text, which can
     // either be derived from the sequence number, read from the manifest, or copied from
@@ -175,6 +180,10 @@
     // Resume a fetcher to continue until the stopping point stored in msg.
     status_t onResumeUntil(const sp<AMessage> &msg);
 
+    const sp<ABuffer> &setAccessUnitProperties(
+            const sp<ABuffer> &accessUnit,
+            const sp<AnotherPacketSource> &source,
+            bool discard = false);
     status_t extractAndQueueAccessUnitsFromTs(const sp<ABuffer> &buffer);
 
     status_t extractAndQueueAccessUnits(
@@ -185,6 +194,8 @@
     void queueDiscontinuity(
             ATSParser::DiscontinuityType type, const sp<AMessage> &extra);
 
+    int32_t getSeqNumberWithAnchorTime(int64_t anchorTimeUs) const;
+    int32_t getSeqNumberForDiscontinuity(size_t discontinuitySeq) const;
     int32_t getSeqNumberForTime(int64_t timeUs) const;
 
     void updateDuration();
diff --git a/media/libstagefright/include/SoftVideoDecoderOMXComponent.h b/media/libstagefright/include/SoftVideoDecoderOMXComponent.h
index d050fa6..7f200dd 100644
--- a/media/libstagefright/include/SoftVideoDecoderOMXComponent.h
+++ b/media/libstagefright/include/SoftVideoDecoderOMXComponent.h
@@ -27,8 +27,6 @@
 #include <utils/threads.h>
 #include <utils/Vector.h>
 
-#define ARRAY_SIZE(a) (sizeof(a) / sizeof(*(a)))
-
 namespace android {
 
 struct SoftVideoDecoderOMXComponent : public SimpleSoftOMXComponent {
diff --git a/media/libstagefright/include/WVMExtractor.h b/media/libstagefright/include/WVMExtractor.h
index 8e62946..ab7e8b8 100644
--- a/media/libstagefright/include/WVMExtractor.h
+++ b/media/libstagefright/include/WVMExtractor.h
@@ -49,6 +49,7 @@
     virtual sp<MediaSource> getTrack(size_t index);
     virtual sp<MetaData> getTrackMetaData(size_t index, uint32_t flags);
     virtual sp<MetaData> getMetaData();
+    virtual void setUID(uid_t uid);
 
     // Return the amount of data cached from the current
     // playback positiion (in us).
@@ -74,8 +75,6 @@
     // codec.
     void setCryptoPluginMode(bool cryptoPluginMode);
 
-    void setUID(uid_t uid);
-
     static bool getVendorLibHandle();
 
     status_t getError();
diff --git a/media/libstagefright/mpeg2ts/ATSParser.cpp b/media/libstagefright/mpeg2ts/ATSParser.cpp
index eda6387..6d8866a 100644
--- a/media/libstagefright/mpeg2ts/ATSParser.cpp
+++ b/media/libstagefright/mpeg2ts/ATSParser.cpp
@@ -894,6 +894,12 @@
                 ALOGV("Stream PID 0x%08x of type 0x%02x now has data.",
                      mElementaryPID, mStreamType);
 
+                const char *mime;
+                if (meta->findCString(kKeyMIMEType, &mime)
+                        && !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AVC)
+                        && !IsIDR(accessUnit)) {
+                    continue;
+                }
                 mSource = new AnotherPacketSource(meta);
                 mSource->queueAccessUnit(accessUnit);
             }
diff --git a/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp b/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp
index 72c9dae..010063f 100644
--- a/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp
+++ b/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp
@@ -14,6 +14,9 @@
  * limitations under the License.
  */
 
+//#define LOG_NDEBUG 0
+#define LOG_TAG "AnotherPacketSource"
+
 #include "AnotherPacketSource.h"
 
 #include <media/stagefright/foundation/ABuffer.h>
@@ -38,7 +41,8 @@
       mFormat(NULL),
       mLastQueuedTimeUs(0),
       mEOSResult(OK),
-      mLatestEnqueuedMeta(NULL) {
+      mLatestEnqueuedMeta(NULL),
+      mLatestDequeuedMeta(NULL) {
     setFormat(meta);
 }
 
@@ -92,7 +96,7 @@
 
         sp<RefBase> object;
         if (buffer->meta()->findObject("format", &object)) {
-            return static_cast<MetaData*>(object.get());
+            return mFormat = static_cast<MetaData*>(object.get());
         }
 
         ++it;
@@ -121,6 +125,8 @@
             return INFO_DISCONTINUITY;
         }
 
+        mLatestDequeuedMeta = (*buffer)->meta()->dup();
+
         sp<RefBase> object;
         if ((*buffer)->meta()->findObject("format", &object)) {
             mFormat = static_cast<MetaData*>(object.get());
@@ -142,8 +148,10 @@
     }
 
     if (!mBuffers.empty()) {
+
         const sp<ABuffer> buffer = *mBuffers.begin();
         mBuffers.erase(mBuffers.begin());
+        mLatestDequeuedMeta = buffer->meta()->dup();
 
         int32_t discontinuity;
         if (buffer->meta()->findInt32("discontinuity", &discontinuity)) {
@@ -202,7 +210,7 @@
     mBuffers.push_back(buffer);
     mCondition.signal();
 
-    if (!mLatestEnqueuedMeta.get()) {
+    if (mLatestEnqueuedMeta == NULL) {
         mLatestEnqueuedMeta = buffer->meta();
     } else {
         int64_t latestTimeUs = 0;
@@ -341,9 +349,14 @@
     return (mEOSResult != OK);
 }
 
-sp<AMessage> AnotherPacketSource::getLatestMeta() {
+sp<AMessage> AnotherPacketSource::getLatestEnqueuedMeta() {
     Mutex::Autolock autoLock(mLock);
     return mLatestEnqueuedMeta;
 }
 
+sp<AMessage> AnotherPacketSource::getLatestDequeuedMeta() {
+    Mutex::Autolock autoLock(mLock);
+    return mLatestDequeuedMeta;
+}
+
 }  // namespace android
diff --git a/media/libstagefright/mpeg2ts/AnotherPacketSource.h b/media/libstagefright/mpeg2ts/AnotherPacketSource.h
index f38f9dc..0c717d7 100644
--- a/media/libstagefright/mpeg2ts/AnotherPacketSource.h
+++ b/media/libstagefright/mpeg2ts/AnotherPacketSource.h
@@ -64,7 +64,8 @@
 
     bool isFinished(int64_t duration) const;
 
-    sp<AMessage> getLatestMeta();
+    sp<AMessage> getLatestEnqueuedMeta();
+    sp<AMessage> getLatestDequeuedMeta();
 
 protected:
     virtual ~AnotherPacketSource();
@@ -80,6 +81,7 @@
     List<sp<ABuffer> > mBuffers;
     status_t mEOSResult;
     sp<AMessage> mLatestEnqueuedMeta;
+    sp<AMessage> mLatestDequeuedMeta;
 
     bool wasFormatChange(int32_t discontinuityType) const;
 
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index bd7121e..1f77b2f 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -1159,6 +1159,9 @@
 void AudioFlinger::registerClient(const sp<IAudioFlingerClient>& client)
 {
     Mutex::Autolock _l(mLock);
+    if (client == 0) {
+        return;
+    }
     bool clientAdded = false;
     {
         Mutex::Autolock _cl(mClientLock);
@@ -1453,6 +1456,9 @@
 
 audio_module_handle_t AudioFlinger::loadHwModule(const char *name)
 {
+    if (name == NULL) {
+        return 0;
+    }
     if (!settingsAllowed()) {
         return 0;
     }
@@ -1573,6 +1579,25 @@
     return NO_ERROR;
 }
 
+audio_hw_sync_t AudioFlinger::getAudioHwSyncForSession(audio_session_t sessionId)
+{
+    Mutex::Autolock _l(mLock);
+    for (size_t i = 0; i < mPlaybackThreads.size(); i++) {
+        sp<PlaybackThread> thread = mPlaybackThreads.valueAt(i);
+        if ((thread->hasAudioSession(sessionId) & ThreadBase::TRACK_SESSION) != 0) {
+            // A session can only be on one thread, so exit after first match
+            String8 reply = thread->getParameters(String8(AUDIO_PARAMETER_STREAM_HW_AV_SYNC));
+            AudioParameter param = AudioParameter(reply);
+            int value;
+            if (param.getInt(String8(AUDIO_PARAMETER_STREAM_HW_AV_SYNC), value) == NO_ERROR) {
+                return value;
+            }
+            break;
+        }
+    }
+    return AUDIO_HW_SYNC_INVALID;
+}
+
 // ----------------------------------------------------------------------------
 
 
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index 31c5a1a..753314f 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -248,6 +248,9 @@
     /* Set audio port configuration */
     virtual status_t setAudioPortConfig(const struct audio_port_config *config);
 
+    /* Get the HW synchronization source used for an audio session */
+    virtual audio_hw_sync_t getAudioHwSyncForSession(audio_session_t sessionId);
+
     virtual     status_t    onTransact(
                                 uint32_t code,
                                 const Parcel& data,
@@ -340,7 +343,8 @@
             uint32_t channelCount = FCC_2; // stereo is default
             if (kEnableExtendedChannels) {
                 channelCount = audio_channel_count_from_out_mask(channelMask);
-                if (channelCount > AudioMixer::MAX_NUM_CHANNELS) {
+                if (channelCount < FCC_2 // mono is not supported at this time
+                        || channelCount > AudioMixer::MAX_NUM_CHANNELS) {
                     return false;
                 }
             }
diff --git a/services/audioflinger/AudioMixer.cpp b/services/audioflinger/AudioMixer.cpp
index 6edca1b..7ac2c0c 100644
--- a/services/audioflinger/AudioMixer.cpp
+++ b/services/audioflinger/AudioMixer.cpp
@@ -1797,109 +1797,6 @@
     }
 }
 
-#if 0
-// 2 tracks is also a common case
-// NEVER used in current implementation of process__validate()
-// only use if the 2 tracks have the same output buffer
-void AudioMixer::process__TwoTracks16BitsStereoNoResampling(state_t* state,
-                                                            int64_t pts)
-{
-    int i;
-    uint32_t en = state->enabledTracks;
-
-    i = 31 - __builtin_clz(en);
-    const track_t& t0 = state->tracks[i];
-    AudioBufferProvider::Buffer& b0(t0.buffer);
-
-    en &= ~(1<<i);
-    i = 31 - __builtin_clz(en);
-    const track_t& t1 = state->tracks[i];
-    AudioBufferProvider::Buffer& b1(t1.buffer);
-
-    const int16_t *in0;
-    const int16_t vl0 = t0.volume[0];
-    const int16_t vr0 = t0.volume[1];
-    size_t frameCount0 = 0;
-
-    const int16_t *in1;
-    const int16_t vl1 = t1.volume[0];
-    const int16_t vr1 = t1.volume[1];
-    size_t frameCount1 = 0;
-
-    //FIXME: only works if two tracks use same buffer
-    int32_t* out = t0.mainBuffer;
-    size_t numFrames = state->frameCount;
-    const int16_t *buff = NULL;
-
-
-    while (numFrames) {
-
-        if (frameCount0 == 0) {
-            b0.frameCount = numFrames;
-            int64_t outputPTS = calculateOutputPTS(t0, pts,
-                                                   out - t0.mainBuffer);
-            t0.bufferProvider->getNextBuffer(&b0, outputPTS);
-            if (b0.i16 == NULL) {
-                if (buff == NULL) {
-                    buff = new int16_t[MAX_NUM_CHANNELS * state->frameCount];
-                }
-                in0 = buff;
-                b0.frameCount = numFrames;
-            } else {
-                in0 = b0.i16;
-            }
-            frameCount0 = b0.frameCount;
-        }
-        if (frameCount1 == 0) {
-            b1.frameCount = numFrames;
-            int64_t outputPTS = calculateOutputPTS(t1, pts,
-                                                   out - t0.mainBuffer);
-            t1.bufferProvider->getNextBuffer(&b1, outputPTS);
-            if (b1.i16 == NULL) {
-                if (buff == NULL) {
-                    buff = new int16_t[MAX_NUM_CHANNELS * state->frameCount];
-                }
-                in1 = buff;
-                b1.frameCount = numFrames;
-            } else {
-                in1 = b1.i16;
-            }
-            frameCount1 = b1.frameCount;
-        }
-
-        size_t outFrames = frameCount0 < frameCount1?frameCount0:frameCount1;
-
-        numFrames -= outFrames;
-        frameCount0 -= outFrames;
-        frameCount1 -= outFrames;
-
-        do {
-            int32_t l0 = *in0++;
-            int32_t r0 = *in0++;
-            l0 = mul(l0, vl0);
-            r0 = mul(r0, vr0);
-            int32_t l = *in1++;
-            int32_t r = *in1++;
-            l = mulAdd(l, vl1, l0) >> 12;
-            r = mulAdd(r, vr1, r0) >> 12;
-            // clamping...
-            l = clamp16(l);
-            r = clamp16(r);
-            *out++ = (r<<16) | (l & 0xFFFF);
-        } while (--outFrames);
-
-        if (frameCount0 == 0) {
-            t0.bufferProvider->releaseBuffer(&b0);
-        }
-        if (frameCount1 == 0) {
-            t1.bufferProvider->releaseBuffer(&b1);
-        }
-    }
-
-    delete [] buff;
-}
-#endif
-
 int64_t AudioMixer::calculateOutputPTS(const track_t& t, int64_t basePTS,
                                        int outputFrameIndex)
 {
diff --git a/services/audioflinger/AudioMixer.h b/services/audioflinger/AudioMixer.h
index 5ba377b..3b972bb 100644
--- a/services/audioflinger/AudioMixer.h
+++ b/services/audioflinger/AudioMixer.h
@@ -408,10 +408,6 @@
     static void process__genericResampling(state_t* state, int64_t pts);
     static void process__OneTrack16BitsStereoNoResampling(state_t* state,
                                                           int64_t pts);
-#if 0
-    static void process__TwoTracks16BitsStereoNoResampling(state_t* state,
-                                                           int64_t pts);
-#endif
 
     static int64_t calculateOutputPTS(const track_t& t, int64_t basePTS,
                                       int outputFrameIndex);
diff --git a/services/audioflinger/Effects.cpp b/services/audioflinger/Effects.cpp
index 77aca00..ec3d731 100644
--- a/services/audioflinger/Effects.cpp
+++ b/services/audioflinger/Effects.cpp
@@ -1387,7 +1387,12 @@
 // Must be called with EffectChain::mLock locked
 void AudioFlinger::EffectChain::clearInputBuffer_l(sp<ThreadBase> thread)
 {
-    memset(mInBuffer, 0, thread->frameCount() * thread->frameSize());
+    // TODO: This will change in the future, depending on multichannel
+    // and sample format changes for effects.
+    // Currently effects processing is only available for stereo, AUDIO_FORMAT_PCM_16_BIT
+    // (4 bytes frame size)
+    const size_t frameSize = audio_bytes_per_sample(AUDIO_FORMAT_PCM_16_BIT) * FCC_2;
+    memset(mInBuffer, 0, thread->frameCount() * frameSize);
 }
 
 // Must be called with EffectChain::mLock locked
diff --git a/services/audioflinger/StateQueue.cpp b/services/audioflinger/StateQueue.cpp
index 7e01c9f..40d7bcd 100644
--- a/services/audioflinger/StateQueue.cpp
+++ b/services/audioflinger/StateQueue.cpp
@@ -41,13 +41,14 @@
 // Constructor and destructor
 
 template<typename T> StateQueue<T>::StateQueue() :
-    mNext(NULL), mAck(NULL), mCurrent(NULL),
+    mAck(NULL), mCurrent(NULL),
     mMutating(&mStates[0]), mExpecting(NULL),
     mInMutation(false), mIsDirty(false), mIsInitialized(false)
 #ifdef STATE_QUEUE_DUMP
     , mObserverDump(&mObserverDummyDump), mMutatorDump(&mMutatorDummyDump)
 #endif
 {
+    atomic_init(&mNext, 0);
 }
 
 template<typename T> StateQueue<T>::~StateQueue()
@@ -58,11 +59,8 @@
 
 template<typename T> const T* StateQueue<T>::poll()
 {
-#ifdef __LP64__
-    const T *next = (const T *) android_atomic_acquire_load64((volatile int64_t *) &mNext);
-#else
-    const T *next = (const T *) android_atomic_acquire_load((volatile int32_t *) &mNext);
-#endif
+    const T *next = (const T *) atomic_load_explicit(&mNext, memory_order_acquire);
+
     if (next != mCurrent) {
         mAck = next;    // no additional barrier needed
         mCurrent = next;
@@ -144,11 +142,7 @@
         }
 
         // publish
-#ifdef __LP64__
-        android_atomic_release_store64((int64_t) mMutating, (volatile int64_t *) &mNext);
-#else
-        android_atomic_release_store((int32_t) mMutating, (volatile int32_t *) &mNext);
-#endif
+        atomic_store_explicit(&mNext, (uintptr_t)mMutating, memory_order_release);
         mExpecting = mMutating;
 
         // copy with circular wraparound
diff --git a/services/audioflinger/StateQueue.h b/services/audioflinger/StateQueue.h
index 9e176c4..27f6a28 100644
--- a/services/audioflinger/StateQueue.h
+++ b/services/audioflinger/StateQueue.h
@@ -17,6 +17,8 @@
 #ifndef ANDROID_AUDIO_STATE_QUEUE_H
 #define ANDROID_AUDIO_STATE_QUEUE_H
 
+#include <stdatomic.h>
+
 // The state queue template class was originally driven by this use case / requirements:
 //  There are two threads: a fast mixer, and a normal mixer, and they share state.
 //  The interesting part of the shared state is a set of active fast tracks,
@@ -186,7 +188,7 @@
     T                 mStates[kN];      // written by mutator, read by observer
 
     // "volatile" is meaningless with SMP, but here it indicates that we're using atomic ops
-    volatile const T* mNext; // written by mutator to advance next, read by observer
+    atomic_uintptr_t  mNext; // written by mutator to advance next, read by observer
     volatile const T* mAck;  // written by observer to acknowledge advance of next, read by mutator
 
     // only used by observer
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 2e2f533..7d583bb5 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -910,11 +910,11 @@
         goto Exit;
     }
 
-    // Reject any effect on multichannel sinks.
+    // Reject any effect on mixer or duplicating multichannel sinks.
     // TODO: fix both format and multichannel issues with effects.
-    if (mChannelCount != FCC_2) {
-        ALOGW("createEffect_l() Cannot add effect %s for multichannel(%d) thread",
-                desc->name, mChannelCount);
+    if ((mType == MIXER || mType == DUPLICATING) && mChannelCount != FCC_2) {
+        ALOGW("createEffect_l() Cannot add effect %s for multichannel(%d) %s threads",
+                desc->name, mChannelCount, mType == MIXER ? "MIXER" : "DUPLICATING");
         lStatus = BAD_VALUE;
         goto Exit;
     }
@@ -2639,12 +2639,9 @@
 
     threadLoop_exit();
 
-    // for DuplicatingThread, standby mode is handled by the outputTracks, otherwise ...
-    if (mType == MIXER || mType == DIRECT || mType == OFFLOAD) {
-        // put output stream into standby mode
-        if (!mStandby) {
-            mOutput->stream->common.standby(&mOutput->stream->common);
-        }
+    if (!mStandby) {
+        threadLoop_standby();
+        mStandby = true;
     }
 
     releaseWakeLock();
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index 48093da..c5ab832 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -1885,10 +1885,10 @@
     buf.mFrameCount = buffer->frameCount;
     status_t status = mPeerProxy->obtainBuffer(&buf, &mPeerTimeout);
     ALOGV_IF(status != NO_ERROR, "PatchTrack() %p getNextBuffer status %d", this, status);
+    buffer->frameCount = buf.mFrameCount;
     if (buf.mFrameCount == 0) {
         return WOULD_BLOCK;
     }
-    buffer->frameCount = buf.mFrameCount;
     status = Track::getNextBuffer(buffer, pts);
     return status;
 }
@@ -2166,10 +2166,10 @@
     status_t status = mPeerProxy->obtainBuffer(&buf, &mPeerTimeout);
     ALOGV_IF(status != NO_ERROR,
              "PatchRecord() %p mPeerProxy->obtainBuffer status %d", this, status);
+    buffer->frameCount = buf.mFrameCount;
     if (buf.mFrameCount == 0) {
         return WOULD_BLOCK;
     }
-    buffer->frameCount = buf.mFrameCount;
     status = RecordTrack::getNextBuffer(buffer, pts);
     return status;
 }
diff --git a/services/audiopolicy/Android.mk b/services/audiopolicy/Android.mk
index f3be42d..6512c38 100644
--- a/services/audiopolicy/Android.mk
+++ b/services/audiopolicy/Android.mk
@@ -30,7 +30,7 @@
     libbinder \
     libmedia \
     libhardware \
-    libhardware_legacy \
+    libhardware_legacy
 
 ifneq ($(USE_LEGACY_AUDIO_POLICY), 1)
 LOCAL_SHARED_LIBRARIES += \
@@ -58,7 +58,8 @@
 LOCAL_SHARED_LIBRARIES := \
     libcutils \
     libutils \
-    liblog
+    liblog \
+    libsoundtrigger
 
 LOCAL_STATIC_LIBRARIES := \
     libmedia_helper
diff --git a/services/audiopolicy/AudioPolicyClientImpl.cpp b/services/audiopolicy/AudioPolicyClientImpl.cpp
index c0019d1..3e090e9 100644
--- a/services/audiopolicy/AudioPolicyClientImpl.cpp
+++ b/services/audiopolicy/AudioPolicyClientImpl.cpp
@@ -17,6 +17,7 @@
 #define LOG_TAG "AudioPolicyClientImpl"
 //#define LOG_NDEBUG 0
 
+#include <soundtrigger/SoundTrigger.h>
 #include <utils/Log.h>
 #include "AudioPolicyService.h"
 
diff --git a/services/audiopolicy/AudioPolicyInterface.h b/services/audiopolicy/AudioPolicyInterface.h
index 50ee803..5524463 100644
--- a/services/audiopolicy/AudioPolicyInterface.h
+++ b/services/audiopolicy/AudioPolicyInterface.h
@@ -190,6 +190,11 @@
     virtual status_t setAudioPortConfig(const struct audio_port_config *config) = 0;
     virtual void clearAudioPatches(uid_t uid) = 0;
 
+    virtual status_t acquireSoundTriggerSession(audio_session_t *session,
+                                           audio_io_handle_t *ioHandle,
+                                           audio_devices_t *device) = 0;
+
+    virtual status_t releaseSoundTriggerSession(audio_session_t session) = 0;
 };
 
 
diff --git a/services/audiopolicy/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/AudioPolicyInterfaceImpl.cpp
index 75745b3..2c51e25 100644
--- a/services/audiopolicy/AudioPolicyInterfaceImpl.cpp
+++ b/services/audiopolicy/AudioPolicyInterfaceImpl.cpp
@@ -531,4 +531,24 @@
     return mAudioPolicyManager->setAudioPortConfig(config);
 }
 
+status_t AudioPolicyService::acquireSoundTriggerSession(audio_session_t *session,
+                                       audio_io_handle_t *ioHandle,
+                                       audio_devices_t *device)
+{
+    if (mAudioPolicyManager == NULL) {
+        return NO_INIT;
+    }
+
+    return mAudioPolicyManager->acquireSoundTriggerSession(session, ioHandle, device);
+}
+
+status_t AudioPolicyService::releaseSoundTriggerSession(audio_session_t session)
+{
+    if (mAudioPolicyManager == NULL) {
+        return NO_INIT;
+    }
+
+    return mAudioPolicyManager->releaseSoundTriggerSession(session);
+}
+
 }; // namespace android
diff --git a/services/audiopolicy/AudioPolicyInterfaceImplLegacy.cpp b/services/audiopolicy/AudioPolicyInterfaceImplLegacy.cpp
index aa46ace..f20c070 100644
--- a/services/audiopolicy/AudioPolicyInterfaceImplLegacy.cpp
+++ b/services/audiopolicy/AudioPolicyInterfaceImplLegacy.cpp
@@ -496,10 +496,21 @@
                                     audio_output_flags_t flags,
                                     const audio_offload_info_t *offloadInfo)
 {
-    //FIXME: temporary to fix build with USE_LEGACY_AUDIO_POLICY
-    audio_stream_type_t stream = AUDIO_STREAM_MUSIC;
+    audio_stream_type_t stream = audio_attributes_to_stream_type(attr);
+
     return getOutput(stream, samplingRate, format, channelMask, flags, offloadInfo);
 }
 
+status_t AudioPolicyService::acquireSoundTriggerSession(audio_session_t *session,
+                                       audio_io_handle_t *ioHandle,
+                                       audio_devices_t *device)
+{
+    return INVALID_OPERATION;
+}
+
+status_t AudioPolicyService::releaseSoundTriggerSession(audio_session_t session)
+{
+    return INVALID_OPERATION;
+}
 
 }; // namespace android
diff --git a/services/audiopolicy/AudioPolicyManager.cpp b/services/audiopolicy/AudioPolicyManager.cpp
index 65d52d0..f95b839 100644
--- a/services/audiopolicy/AudioPolicyManager.cpp
+++ b/services/audiopolicy/AudioPolicyManager.cpp
@@ -43,6 +43,7 @@
 #include <hardware/audio.h>
 #include <hardware/audio_effect.h>
 #include <media/AudioParameter.h>
+#include <soundtrigger/SoundTrigger.h>
 #include "AudioPolicyManager.h"
 #include "audio_policy_conf.h"
 
@@ -87,14 +88,15 @@
     STRING_TO_ENUM(AUDIO_DEVICE_OUT_SPDIF),
     STRING_TO_ENUM(AUDIO_DEVICE_OUT_FM),
     STRING_TO_ENUM(AUDIO_DEVICE_OUT_AUX_LINE),
+    STRING_TO_ENUM(AUDIO_DEVICE_IN_AMBIENT),
     STRING_TO_ENUM(AUDIO_DEVICE_IN_BUILTIN_MIC),
     STRING_TO_ENUM(AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET),
     STRING_TO_ENUM(AUDIO_DEVICE_IN_ALL_SCO),
     STRING_TO_ENUM(AUDIO_DEVICE_IN_WIRED_HEADSET),
     STRING_TO_ENUM(AUDIO_DEVICE_IN_AUX_DIGITAL),
     STRING_TO_ENUM(AUDIO_DEVICE_IN_HDMI),
-    STRING_TO_ENUM(AUDIO_DEVICE_IN_VOICE_CALL),
     STRING_TO_ENUM(AUDIO_DEVICE_IN_TELEPHONY_RX),
+    STRING_TO_ENUM(AUDIO_DEVICE_IN_VOICE_CALL),
     STRING_TO_ENUM(AUDIO_DEVICE_IN_BACK_MIC),
     STRING_TO_ENUM(AUDIO_DEVICE_IN_REMOTE_SUBMIX),
     STRING_TO_ENUM(AUDIO_DEVICE_IN_ANLG_DOCK_HEADSET),
@@ -116,6 +118,7 @@
     STRING_TO_ENUM(AUDIO_OUTPUT_FLAG_DEEP_BUFFER),
     STRING_TO_ENUM(AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD),
     STRING_TO_ENUM(AUDIO_OUTPUT_FLAG_NON_BLOCKING),
+    STRING_TO_ENUM(AUDIO_OUTPUT_FLAG_HW_AV_SYNC),
 };
 
 const StringToEnum sFormatNameToEnumTable[] = {
@@ -206,9 +209,10 @@
                                                           audio_policy_dev_state_t state,
                                                   const char *device_address)
 {
-    String8 address = String8(device_address);
+    String8 address = (device_address == NULL) ? String8("") : String8(device_address);
 
-    ALOGV("setDeviceConnectionState() device: %x, state %d, address %s", device, state, device_address);
+    ALOGV("setDeviceConnectionState() device: %x, state %d, address %s",
+            device, state, address.string());
 
     // connect/disconnect only 1 device at a time
     if (!audio_is_output_device(device) && !audio_is_input_device(device)) return BAD_VALUE;
@@ -237,10 +241,14 @@
             // register new device as available
             index = mAvailableOutputDevices.add(devDesc);
             if (index >= 0) {
-                mAvailableOutputDevices[index]->mId = nextUniqueId();
                 sp<HwModule> module = getModuleForDevice(device);
-                ALOG_ASSERT(module != NULL, "setDeviceConnectionState():"
-                        "could not find HW module for device %08x", device);
+                if (module == 0) {
+                    ALOGD("setDeviceConnectionState() could not find HW module for device %08x",
+                          device);
+                    mAvailableOutputDevices.remove(devDesc);
+                    return INVALID_OPERATION;
+                }
+                mAvailableOutputDevices[index]->mId = nextUniqueId();
                 mAvailableOutputDevices[index]->mModule = module;
             } else {
                 return NO_MEMORY;
@@ -296,17 +304,24 @@
         }
 
         updateDevicesAndOutputs();
+        if (mPhoneState == AUDIO_MODE_IN_CALL) {
+            audio_devices_t newDevice = getNewOutputDevice(mPrimaryOutput, false /*fromCache*/);
+            updateCallRouting(newDevice);
+        }
         for (size_t i = 0; i < mOutputs.size(); i++) {
-            // do not force device change on duplicated output because if device is 0, it will
-            // also force a device 0 for the two outputs it is duplicated to which may override
-            // a valid device selection on those outputs.
-            bool force = !mOutputs.valueAt(i)->isDuplicated()
-                    && (!deviceDistinguishesOnAddress(device)
-                            // always force when disconnecting (a non-duplicated device)
-                            || (state == AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE));
-            setOutputDevice(mOutputs.keyAt(i),
-                            getNewOutputDevice(mOutputs.keyAt(i), true /*fromCache*/),
-                            force, 0);
+            audio_io_handle_t output = mOutputs.keyAt(i);
+            if ((mPhoneState != AUDIO_MODE_IN_CALL) || (output != mPrimaryOutput)) {
+                audio_devices_t newDevice = getNewOutputDevice(mOutputs.keyAt(i),
+                                                               true /*fromCache*/);
+                // do not force device change on duplicated output because if device is 0, it will
+                // also force a device 0 for the two outputs it is duplicated to which may override
+                // a valid device selection on those outputs.
+                bool force = !mOutputs.valueAt(i)->isDuplicated()
+                        && (!deviceDistinguishesOnAddress(device)
+                                // always force when disconnecting (a non-duplicated device)
+                                || (state == AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE));
+                setOutputDevice(output, newDevice, force, 0);
+            }
         }
 
         mpClientInterface->onAudioPortListUpdate();
@@ -364,6 +379,11 @@
 
         closeAllInputs();
 
+        if (mPhoneState == AUDIO_MODE_IN_CALL) {
+            audio_devices_t newDevice = getNewOutputDevice(mPrimaryOutput, false /*fromCache*/);
+            updateCallRouting(newDevice);
+        }
+
         mpClientInterface->onAudioPortListUpdate();
         return NO_ERROR;
     } // end if is input device
@@ -376,9 +396,8 @@
                                                   const char *device_address)
 {
     audio_policy_dev_state_t state = AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE;
-    String8 address = String8(device_address);
     sp<DeviceDescriptor> devDesc = new DeviceDescriptor(String8(""), device);
-    devDesc->mAddress = String8(device_address);
+    devDesc->mAddress = (device_address == NULL) ? String8("") : String8(device_address);
     ssize_t index;
     DeviceVector *deviceVector;
 
@@ -399,10 +418,124 @@
     }
 }
 
+void AudioPolicyManager::updateCallRouting(audio_devices_t rxDevice, int delayMs)
+{
+    bool createTxPatch = false;
+    struct audio_patch patch;
+    patch.num_sources = 1;
+    patch.num_sinks = 1;
+    status_t status;
+    audio_patch_handle_t afPatchHandle;
+    DeviceVector deviceList;
+
+    audio_devices_t txDevice = getDeviceForInputSource(AUDIO_SOURCE_VOICE_COMMUNICATION);
+    ALOGV("updateCallRouting device rxDevice %08x txDevice %08x", rxDevice, txDevice);
+
+    // release existing RX patch if any
+    if (mCallRxPatch != 0) {
+        mpClientInterface->releaseAudioPatch(mCallRxPatch->mAfPatchHandle, 0);
+        mCallRxPatch.clear();
+    }
+    // release TX patch if any
+    if (mCallTxPatch != 0) {
+        mpClientInterface->releaseAudioPatch(mCallTxPatch->mAfPatchHandle, 0);
+        mCallTxPatch.clear();
+    }
+
+    // If the RX device is on the primary HW module, then use legacy routing method for voice calls
+    // via setOutputDevice() on primary output.
+    // Otherwise, create two audio patches for TX and RX path.
+    if (availablePrimaryOutputDevices() & rxDevice) {
+        setOutputDevice(mPrimaryOutput, rxDevice, true, delayMs);
+        // If the TX device is also on the primary HW module, setOutputDevice() will take care
+        // of it due to legacy implementation. If not, create a patch.
+        if ((availablePrimaryInputDevices() & txDevice & ~AUDIO_DEVICE_BIT_IN)
+                == AUDIO_DEVICE_NONE) {
+            createTxPatch = true;
+        }
+    } else {
+        // create RX path audio patch
+        deviceList = mAvailableOutputDevices.getDevicesFromType(rxDevice);
+        ALOG_ASSERT(!deviceList.isEmpty(),
+                    "updateCallRouting() selected device not in output device list");
+        sp<DeviceDescriptor> rxSinkDeviceDesc = deviceList.itemAt(0);
+        deviceList = mAvailableInputDevices.getDevicesFromType(AUDIO_DEVICE_IN_TELEPHONY_RX);
+        ALOG_ASSERT(!deviceList.isEmpty(),
+                    "updateCallRouting() no telephony RX device");
+        sp<DeviceDescriptor> rxSourceDeviceDesc = deviceList.itemAt(0);
+
+        rxSourceDeviceDesc->toAudioPortConfig(&patch.sources[0]);
+        rxSinkDeviceDesc->toAudioPortConfig(&patch.sinks[0]);
+
+        // request to reuse existing output stream if one is already opened to reach the RX device
+        SortedVector<audio_io_handle_t> outputs =
+                                getOutputsForDevice(rxDevice, mOutputs);
+        audio_io_handle_t output = selectOutput(outputs, AUDIO_OUTPUT_FLAG_NONE);
+        if (output != AUDIO_IO_HANDLE_NONE) {
+            sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(output);
+            ALOG_ASSERT(!outputDesc->isDuplicated(),
+                        "updateCallRouting() RX device output is duplicated");
+            outputDesc->toAudioPortConfig(&patch.sources[1]);
+            patch.num_sources = 2;
+        }
+
+        afPatchHandle = AUDIO_PATCH_HANDLE_NONE;
+        status = mpClientInterface->createAudioPatch(&patch, &afPatchHandle, 0);
+        ALOGW_IF(status != NO_ERROR, "updateCallRouting() error %d creating RX audio patch",
+                                               status);
+        if (status == NO_ERROR) {
+            mCallRxPatch = new AudioPatch((audio_patch_handle_t)nextUniqueId(),
+                                       &patch, mUidCached);
+            mCallRxPatch->mAfPatchHandle = afPatchHandle;
+            mCallRxPatch->mUid = mUidCached;
+        }
+        createTxPatch = true;
+    }
+    if (createTxPatch) {
+
+        struct audio_patch patch;
+        patch.num_sources = 1;
+        patch.num_sinks = 1;
+        deviceList = mAvailableInputDevices.getDevicesFromType(txDevice);
+        ALOG_ASSERT(!deviceList.isEmpty(),
+                    "updateCallRouting() selected device not in input device list");
+        sp<DeviceDescriptor> txSourceDeviceDesc = deviceList.itemAt(0);
+        txSourceDeviceDesc->toAudioPortConfig(&patch.sources[0]);
+        deviceList = mAvailableOutputDevices.getDevicesFromType(AUDIO_DEVICE_OUT_TELEPHONY_TX);
+        ALOG_ASSERT(!deviceList.isEmpty(),
+                    "updateCallRouting() no telephony TX device");
+        sp<DeviceDescriptor> txSinkDeviceDesc = deviceList.itemAt(0);
+        txSinkDeviceDesc->toAudioPortConfig(&patch.sinks[0]);
+
+        SortedVector<audio_io_handle_t> outputs =
+                                getOutputsForDevice(AUDIO_DEVICE_OUT_TELEPHONY_TX, mOutputs);
+        audio_io_handle_t output = selectOutput(outputs, AUDIO_OUTPUT_FLAG_NONE);
+        // request to reuse existing output stream if one is already opened to reach the TX
+        // path output device
+        if (output != AUDIO_IO_HANDLE_NONE) {
+            sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(output);
+            ALOG_ASSERT(!outputDesc->isDuplicated(),
+                        "updateCallRouting() RX device output is duplicated");
+            outputDesc->toAudioPortConfig(&patch.sources[1]);
+            patch.num_sources = 2;
+        }
+
+        afPatchHandle = AUDIO_PATCH_HANDLE_NONE;
+        status = mpClientInterface->createAudioPatch(&patch, &afPatchHandle, 0);
+        ALOGW_IF(status != NO_ERROR, "setPhoneState() error %d creating TX audio patch",
+                                               status);
+        if (status == NO_ERROR) {
+            mCallTxPatch = new AudioPatch((audio_patch_handle_t)nextUniqueId(),
+                                       &patch, mUidCached);
+            mCallTxPatch->mAfPatchHandle = afPatchHandle;
+            mCallTxPatch->mUid = mUidCached;
+        }
+    }
+}
+
 void AudioPolicyManager::setPhoneState(audio_mode_t state)
 {
     ALOGV("setPhoneState() state %d", state);
-    audio_devices_t newDevice = AUDIO_DEVICE_NONE;
     if (state < 0 || state >= AUDIO_MODE_CNT) {
         ALOGW("setPhoneState() invalid state %d", state);
         return;
@@ -454,19 +587,12 @@
     }
 
     // check for device and output changes triggered by new phone state
-    newDevice = getNewOutputDevice(mPrimaryOutput, false /*fromCache*/);
     checkA2dpSuspend();
     checkOutputForAllStrategies();
     updateDevicesAndOutputs();
 
     sp<AudioOutputDescriptor> hwOutputDesc = mOutputs.valueFor(mPrimaryOutput);
 
-    // force routing command to audio hardware when ending call
-    // even if no device change is needed
-    if (isStateInCall(oldState) && newDevice == AUDIO_DEVICE_NONE) {
-        newDevice = hwOutputDesc->device();
-    }
-
     int delayMs = 0;
     if (isStateInCall(state)) {
         nsecs_t sysTime = systemTime();
@@ -493,9 +619,30 @@
         }
     }
 
-    // change routing is necessary
-    setOutputDevice(mPrimaryOutput, newDevice, force, delayMs);
+    // Note that despite the fact that getNewOutputDevice() is called on the primary output,
+    // the device returned is not necessarily reachable via this output
+    audio_devices_t rxDevice = getNewOutputDevice(mPrimaryOutput, false /*fromCache*/);
+    // force routing command to audio hardware when ending call
+    // even if no device change is needed
+    if (isStateInCall(oldState) && rxDevice == AUDIO_DEVICE_NONE) {
+        rxDevice = hwOutputDesc->device();
+    }
 
+    if (state == AUDIO_MODE_IN_CALL) {
+        updateCallRouting(rxDevice, delayMs);
+    } else if (oldState == AUDIO_MODE_IN_CALL) {
+        if (mCallRxPatch != 0) {
+            mpClientInterface->releaseAudioPatch(mCallRxPatch->mAfPatchHandle, 0);
+            mCallRxPatch.clear();
+        }
+        if (mCallTxPatch != 0) {
+            mpClientInterface->releaseAudioPatch(mCallTxPatch->mAfPatchHandle, 0);
+            mCallTxPatch.clear();
+        }
+        setOutputDevice(mPrimaryOutput, rxDevice, force, 0);
+    } else {
+        setOutputDevice(mPrimaryOutput, rxDevice, force, 0);
+    }
     // if entering in call state, handle special case of active streams
     // pertaining to sonification strategy see handleIncallSonification()
     if (isStateInCall(state)) {
@@ -584,10 +731,16 @@
     checkA2dpSuspend();
     checkOutputForAllStrategies();
     updateDevicesAndOutputs();
+    if (mPhoneState == AUDIO_MODE_IN_CALL) {
+        audio_devices_t newDevice = getNewOutputDevice(mPrimaryOutput, true /*fromCache*/);
+        updateCallRouting(newDevice);
+    }
     for (size_t i = 0; i < mOutputs.size(); i++) {
         audio_io_handle_t output = mOutputs.keyAt(i);
         audio_devices_t newDevice = getNewOutputDevice(output, true /*fromCache*/);
-        setOutputDevice(output, newDevice, (newDevice != AUDIO_DEVICE_NONE));
+        if ((mPhoneState != AUDIO_MODE_IN_CALL) || (output != mPrimaryOutput)) {
+            setOutputDevice(output, newDevice, (newDevice != AUDIO_DEVICE_NONE));
+        }
         if (forceVolumeReeval && (newDevice != AUDIO_DEVICE_NONE)) {
             applyStreamVolumes(output, newDevice, 0, true);
         }
@@ -665,12 +818,17 @@
         ALOGE("getOutputForAttr() called with NULL audio attributes");
         return 0;
     }
-    ALOGV("getOutputForAttr() usage=%d, content=%d, tag=%s",
-            attr->usage, attr->content_type, attr->tags);
+    ALOGV("getOutputForAttr() usage=%d, content=%d, tag=%s flags=%08x",
+            attr->usage, attr->content_type, attr->tags, attr->flags);
 
     // TODO this is where filtering for custom policies (rerouting, dynamic sources) will go
     routing_strategy strategy = (routing_strategy) getStrategyForAttr(attr);
     audio_devices_t device = getDeviceForStrategy(strategy, false /*fromCache*/);
+
+    if ((attr->flags & AUDIO_FLAG_HW_AV_SYNC) != 0) {
+        flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_HW_AV_SYNC);
+    }
+
     ALOGV("getOutputForAttr() device %d, samplingRate %d, format %x, channelMask %x, flags %x",
           device, samplingRate, format, channelMask, flags);
 
@@ -709,7 +867,9 @@
             config.sample_rate = mTestSamplingRate;
             config.channel_mask = mTestChannels;
             config.format = mTestFormat;
-            config.offload_info = *offloadInfo;
+            if (offloadInfo != NULL) {
+                config.offload_info = *offloadInfo;
+            }
             status = mpClientInterface->openOutput(0,
                                                   &mTestOutputs[mCurOutput],
                                                   &config,
@@ -738,6 +898,9 @@
     if ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0) {
         flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_DIRECT);
     }
+    if ((flags & AUDIO_OUTPUT_FLAG_HW_AV_SYNC) != 0) {
+        flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_DIRECT);
+    }
 
     // Do not allow offloading if one non offloadable effect is enabled. This prevents from
     // creating an offloaded track and tearing it down immediately after start when audioflinger
@@ -784,7 +947,9 @@
         config.sample_rate = samplingRate;
         config.channel_mask = channelMask;
         config.format = format;
-        config.offload_info = *offloadInfo;
+        if (offloadInfo != NULL) {
+            config.offload_info = *offloadInfo;
+        }
         status = mpClientInterface->openOutput(profile->mModule->mHandle,
                                                &output,
                                                &config,
@@ -1115,6 +1280,17 @@
     config.channel_mask = channelMask;
     config.format = format;
     audio_io_handle_t input = AUDIO_IO_HANDLE_NONE;
+
+    bool isSoundTrigger = false;
+    if (inputSource == AUDIO_SOURCE_HOTWORD) {
+        ssize_t index = mSoundTriggerSessions.indexOfKey(session);
+        if (index >= 0) {
+            input = mSoundTriggerSessions.valueFor(session);
+            isSoundTrigger = true;
+            ALOGV("SoundTrigger capture on session %d input %d", session, input);
+        }
+    }
+
     status_t status = mpClientInterface->openInput(profile->mModule->mHandle,
                                                    &input,
                                                    &config,
@@ -1145,6 +1321,7 @@
     inputDesc->mChannelMask = channelMask;
     inputDesc->mDevice = device;
     inputDesc->mSessions.add(session);
+    inputDesc->mIsSoundTrigger = isSoundTrigger;
 
     addInput(input, inputDesc);
     mpClientInterface->onAudioPortListUpdate();
@@ -1190,6 +1367,9 @@
     }
 
     if (inputDesc->mRefCount == 0) {
+        if (activeInputsCount() == 0) {
+            SoundTrigger::setCaptureState(true);
+        }
         setInputDevice(input, getNewInputDevice(input), true /* force */);
 
         // Automatically enable the remote submix output when input is started.
@@ -1238,6 +1418,10 @@
         }
 
         resetInputDevice(input);
+
+        if (activeInputsCount() == 0) {
+            SoundTrigger::setCaptureState(false);
+        }
     }
     return NO_ERROR;
 }
@@ -1863,6 +2047,25 @@
     return module;
 }
 
+audio_devices_t AudioPolicyManager::availablePrimaryOutputDevices()
+{
+    sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(mPrimaryOutput);
+    audio_devices_t devices = outputDesc->mProfile->mSupportedDevices.types();
+    return devices & mAvailableOutputDevices.types();
+}
+
+audio_devices_t AudioPolicyManager::availablePrimaryInputDevices()
+{
+    audio_module_handle_t primaryHandle =
+                                mOutputs.valueFor(mPrimaryOutput)->mProfile->mModule->mHandle;
+    audio_devices_t devices = AUDIO_DEVICE_NONE;
+    for (size_t i = 0; i < mAvailableInputDevices.size(); i++) {
+        if (mAvailableInputDevices[i]->mModule->mHandle == primaryHandle) {
+            devices |= mAvailableInputDevices[i]->mDeviceType;
+        }
+    }
+    return devices;
+}
 
 status_t AudioPolicyManager::createAudioPatch(const struct audio_patch *patch,
                                                audio_patch_handle_t *handle,
@@ -2249,6 +2452,31 @@
     }
 }
 
+status_t AudioPolicyManager::acquireSoundTriggerSession(audio_session_t *session,
+                                       audio_io_handle_t *ioHandle,
+                                       audio_devices_t *device)
+{
+    *session = (audio_session_t)mpClientInterface->newAudioUniqueId();
+    *ioHandle = (audio_io_handle_t)mpClientInterface->newAudioUniqueId();
+    *device = getDeviceForInputSource(AUDIO_SOURCE_HOTWORD);
+
+    mSoundTriggerSessions.add(*session, *ioHandle);
+
+    return NO_ERROR;
+}
+
+status_t AudioPolicyManager::releaseSoundTriggerSession(audio_session_t session)
+{
+    ssize_t index = mSoundTriggerSessions.indexOfKey(session);
+    if (index < 0) {
+        ALOGW("acquireSoundTriggerSession() session %d not registered", session);
+        return BAD_VALUE;
+    }
+
+    mSoundTriggerSessions.removeItem(session);
+    return NO_ERROR;
+}
+
 status_t AudioPolicyManager::addAudioPatch(audio_patch_handle_t handle,
                                            const sp<AudioPatch>& patch)
 {
@@ -3587,6 +3815,21 @@
         // FALL THROUGH
 
     case STRATEGY_PHONE:
+        // Force use of only devices on primary output if:
+        // - in call AND
+        //   - cannot route from voice call RX OR
+        //   - audio HAL version is < 3.0 and TX device is on the primary HW module
+        if (mPhoneState == AUDIO_MODE_IN_CALL) {
+            audio_devices_t txDevice = getDeviceForInputSource(AUDIO_SOURCE_VOICE_COMMUNICATION);
+            sp<AudioOutputDescriptor> hwOutputDesc = mOutputs.valueFor(mPrimaryOutput);
+            if (((mAvailableInputDevices.types() &
+                    AUDIO_DEVICE_IN_TELEPHONY_RX & ~AUDIO_DEVICE_BIT_IN) == 0) ||
+                    (((txDevice & availablePrimaryInputDevices() & ~AUDIO_DEVICE_BIT_IN) != 0) &&
+                         (hwOutputDesc->mAudioPort->mModule->mHalVersion <
+                             AUDIO_DEVICE_API_VERSION_3_0))) {
+                availableOutputDeviceTypes = availablePrimaryOutputDevices();
+            }
+        }
         // for phone strategy, we first consider the forced use and then the available devices by order
         // of priority
         switch (mForceUse[AUDIO_POLICY_FORCE_FOR_COMMUNICATION]) {
@@ -3616,11 +3859,11 @@
             if (device) break;
             device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_WIRED_HEADSET;
             if (device) break;
+            device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_USB_DEVICE;
+            if (device) break;
             if (mPhoneState != AUDIO_MODE_IN_CALL) {
                 device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_USB_ACCESSORY;
                 if (device) break;
-                device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_USB_DEVICE;
-                if (device) break;
                 device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET;
                 if (device) break;
                 device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_AUX_DIGITAL;
@@ -3657,6 +3900,8 @@
                 device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET;
                 if (device) break;
             }
+            device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_LINE;
+            if (device) break;
             device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_SPEAKER;
             if (device) break;
             device = mDefaultOutputDevice->mDeviceType;
@@ -3713,6 +3958,9 @@
         if (device2 == AUDIO_DEVICE_NONE) {
             device2 = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_WIRED_HEADPHONE;
         }
+        if ((device2 == AUDIO_DEVICE_NONE)) {
+            device2 = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_LINE;
+        }
         if (device2 == AUDIO_DEVICE_NONE) {
             device2 = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_WIRED_HEADSET;
         }
@@ -4009,7 +4257,8 @@
             inputDesc->toAudioPortConfig(&patch.sinks[0]);
             // AUDIO_SOURCE_HOTWORD is for internal use only:
             // handled as AUDIO_SOURCE_VOICE_RECOGNITION by the audio HAL
-            if (patch.sinks[0].ext.mix.usecase.source == AUDIO_SOURCE_HOTWORD) {
+            if (patch.sinks[0].ext.mix.usecase.source == AUDIO_SOURCE_HOTWORD &&
+                    !inputDesc->mIsSoundTrigger) {
                 patch.sinks[0].ext.mix.usecase.source = AUDIO_SOURCE_VOICE_RECOGNITION;
             }
             patch.num_sinks = 1;
@@ -4118,19 +4367,60 @@
           device = AUDIO_DEVICE_IN_VOICE_CALL;
           break;
       }
-      // FALL THROUGH
+      break;
 
     case AUDIO_SOURCE_DEFAULT:
     case AUDIO_SOURCE_MIC:
     if (availableDeviceTypes & AUDIO_DEVICE_IN_BLUETOOTH_A2DP) {
         device = AUDIO_DEVICE_IN_BLUETOOTH_A2DP;
-        break;
+    } else if (availableDeviceTypes & AUDIO_DEVICE_IN_WIRED_HEADSET) {
+        device = AUDIO_DEVICE_IN_WIRED_HEADSET;
+    } else if (availableDeviceTypes & AUDIO_DEVICE_IN_USB_DEVICE) {
+        device = AUDIO_DEVICE_IN_USB_DEVICE;
+    } else if (availableDeviceTypes & AUDIO_DEVICE_IN_BUILTIN_MIC) {
+        device = AUDIO_DEVICE_IN_BUILTIN_MIC;
     }
-    // FALL THROUGH
+    break;
+
+    case AUDIO_SOURCE_VOICE_COMMUNICATION:
+        // Allow only use of devices on primary input if in call and HAL does not support routing
+        // to voice call path.
+        if ((mPhoneState == AUDIO_MODE_IN_CALL) &&
+                (mAvailableOutputDevices.types() & AUDIO_DEVICE_OUT_TELEPHONY_TX) == 0) {
+            availableDeviceTypes = availablePrimaryInputDevices() & ~AUDIO_DEVICE_BIT_IN;
+        }
+
+        switch (mForceUse[AUDIO_POLICY_FORCE_FOR_COMMUNICATION]) {
+        case AUDIO_POLICY_FORCE_BT_SCO:
+            // if SCO device is requested but no SCO device is available, fall back to default case
+            if (availableDeviceTypes & AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET) {
+                device = AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET;
+                break;
+            }
+            // FALL THROUGH
+
+        default:    // FORCE_NONE
+            if (availableDeviceTypes & AUDIO_DEVICE_IN_WIRED_HEADSET) {
+                device = AUDIO_DEVICE_IN_WIRED_HEADSET;
+            } else if (availableDeviceTypes & AUDIO_DEVICE_IN_USB_DEVICE) {
+                device = AUDIO_DEVICE_IN_USB_DEVICE;
+            } else if (availableDeviceTypes & AUDIO_DEVICE_IN_BUILTIN_MIC) {
+                device = AUDIO_DEVICE_IN_BUILTIN_MIC;
+            }
+            break;
+
+        case AUDIO_POLICY_FORCE_SPEAKER:
+            if (availableDeviceTypes & AUDIO_DEVICE_IN_BACK_MIC) {
+                device = AUDIO_DEVICE_IN_BACK_MIC;
+            } else if (availableDeviceTypes & AUDIO_DEVICE_IN_BUILTIN_MIC) {
+                device = AUDIO_DEVICE_IN_BUILTIN_MIC;
+            }
+            break;
+        }
+        break;
 
     case AUDIO_SOURCE_VOICE_RECOGNITION:
     case AUDIO_SOURCE_HOTWORD:
-    case AUDIO_SOURCE_VOICE_COMMUNICATION:
         if (mForceUse[AUDIO_POLICY_FORCE_FOR_RECORD] == AUDIO_POLICY_FORCE_BT_SCO &&
                 availableDeviceTypes & AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET) {
             device = AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET;
@@ -4194,6 +4484,18 @@
     return 0;
 }
 
+uint32_t AudioPolicyManager::activeInputsCount() const
+{
+    uint32_t count = 0;
+    for (size_t i = 0; i < mInputs.size(); i++) {
+        const sp<AudioInputDescriptor>  desc = mInputs.valueAt(i);
+        if (desc->mRefCount > 0) {
+            return count++;
+        }
+    }
+    return count;
+}
+
 
 audio_devices_t AudioPolicyManager::getDeviceForVolume(audio_devices_t device)
 {
@@ -4233,10 +4535,13 @@
         case AUDIO_DEVICE_OUT_BLUETOOTH_A2DP:
         case AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES:
             return DEVICE_CATEGORY_HEADSET;
+        case AUDIO_DEVICE_OUT_LINE:
+        case AUDIO_DEVICE_OUT_AUX_DIGITAL:
+        /*USB?  Remote submix?*/
+            return DEVICE_CATEGORY_EXT_MEDIA;
         case AUDIO_DEVICE_OUT_SPEAKER:
         case AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT:
         case AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER:
-        case AUDIO_DEVICE_OUT_AUX_DIGITAL:
         case AUDIO_DEVICE_OUT_USB_ACCESSORY:
         case AUDIO_DEVICE_OUT_USB_DEVICE:
         case AUDIO_DEVICE_OUT_REMOTE_SUBMIX:
@@ -4303,6 +4608,11 @@
 };
 
 const AudioPolicyManager::VolumeCurvePoint
+    AudioPolicyManager::sExtMediaSystemVolumeCurve[AudioPolicyManager::VOLCNT] = {
+    {1, -58.0f}, {20, -40.0f}, {60, -21.0f}, {100, -10.0f}
+};
+
+const AudioPolicyManager::VolumeCurvePoint
     AudioPolicyManager::sSpeakerMediaVolumeCurve[AudioPolicyManager::VOLCNT] = {
     {1, -56.0f}, {20, -34.0f}, {60, -11.0f}, {100, 0.0f}
 };
@@ -4358,52 +4668,62 @@
     { // AUDIO_STREAM_VOICE_CALL
         sDefaultVoiceVolumeCurve, // DEVICE_CATEGORY_HEADSET
         sSpeakerVoiceVolumeCurve, // DEVICE_CATEGORY_SPEAKER
-        sDefaultVoiceVolumeCurve  // DEVICE_CATEGORY_EARPIECE
+        sDefaultVoiceVolumeCurve, // DEVICE_CATEGORY_EARPIECE
+        sDefaultMediaVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
     },
     { // AUDIO_STREAM_SYSTEM
         sHeadsetSystemVolumeCurve, // DEVICE_CATEGORY_HEADSET
         sDefaultSystemVolumeCurve, // DEVICE_CATEGORY_SPEAKER
-        sDefaultSystemVolumeCurve  // DEVICE_CATEGORY_EARPIECE
+        sDefaultSystemVolumeCurve,  // DEVICE_CATEGORY_EARPIECE
+        sExtMediaSystemVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
     },
     { // AUDIO_STREAM_RING
         sDefaultVolumeCurve, // DEVICE_CATEGORY_HEADSET
         sSpeakerSonificationVolumeCurve, // DEVICE_CATEGORY_SPEAKER
-        sDefaultVolumeCurve  // DEVICE_CATEGORY_EARPIECE
+        sDefaultVolumeCurve,  // DEVICE_CATEGORY_EARPIECE
+        sExtMediaSystemVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
     },
     { // AUDIO_STREAM_MUSIC
         sDefaultMediaVolumeCurve, // DEVICE_CATEGORY_HEADSET
         sSpeakerMediaVolumeCurve, // DEVICE_CATEGORY_SPEAKER
-        sDefaultMediaVolumeCurve  // DEVICE_CATEGORY_EARPIECE
+        sDefaultMediaVolumeCurve, // DEVICE_CATEGORY_EARPIECE
+        sDefaultMediaVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
     },
     { // AUDIO_STREAM_ALARM
         sDefaultVolumeCurve, // DEVICE_CATEGORY_HEADSET
         sSpeakerSonificationVolumeCurve, // DEVICE_CATEGORY_SPEAKER
-        sDefaultVolumeCurve  // DEVICE_CATEGORY_EARPIECE
+        sDefaultVolumeCurve,  // DEVICE_CATEGORY_EARPIECE
+        sExtMediaSystemVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
     },
     { // AUDIO_STREAM_NOTIFICATION
         sDefaultVolumeCurve, // DEVICE_CATEGORY_HEADSET
         sSpeakerSonificationVolumeCurve, // DEVICE_CATEGORY_SPEAKER
-        sDefaultVolumeCurve  // DEVICE_CATEGORY_EARPIECE
+        sDefaultVolumeCurve,  // DEVICE_CATEGORY_EARPIECE
+        sExtMediaSystemVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
     },
     { // AUDIO_STREAM_BLUETOOTH_SCO
         sDefaultVoiceVolumeCurve, // DEVICE_CATEGORY_HEADSET
         sSpeakerVoiceVolumeCurve, // DEVICE_CATEGORY_SPEAKER
-        sDefaultVoiceVolumeCurve  // DEVICE_CATEGORY_EARPIECE
+        sDefaultVoiceVolumeCurve, // DEVICE_CATEGORY_EARPIECE
+        sDefaultMediaVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
     },
     { // AUDIO_STREAM_ENFORCED_AUDIBLE
         sHeadsetSystemVolumeCurve, // DEVICE_CATEGORY_HEADSET
         sDefaultSystemVolumeCurve, // DEVICE_CATEGORY_SPEAKER
-        sDefaultSystemVolumeCurve  // DEVICE_CATEGORY_EARPIECE
+        sDefaultSystemVolumeCurve, // DEVICE_CATEGORY_EARPIECE
+        sExtMediaSystemVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
     },
     {  // AUDIO_STREAM_DTMF
         sHeadsetSystemVolumeCurve, // DEVICE_CATEGORY_HEADSET
         sDefaultSystemVolumeCurve, // DEVICE_CATEGORY_SPEAKER
-        sDefaultSystemVolumeCurve  // DEVICE_CATEGORY_EARPIECE
+        sDefaultSystemVolumeCurve, // DEVICE_CATEGORY_EARPIECE
+        sExtMediaSystemVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
     },
     { // AUDIO_STREAM_TTS
         sDefaultMediaVolumeCurve, // DEVICE_CATEGORY_HEADSET
         sSpeakerMediaVolumeCurve, // DEVICE_CATEGORY_SPEAKER
-        sDefaultMediaVolumeCurve  // DEVICE_CATEGORY_EARPIECE
+        sDefaultMediaVolumeCurve, // DEVICE_CATEGORY_EARPIECE
+        sDefaultMediaVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
     },
 };
 
@@ -4879,7 +5199,7 @@
 AudioPolicyManager::AudioInputDescriptor::AudioInputDescriptor(const sp<IOProfile>& profile)
     : mId(0), mIoHandle(0),
       mDevice(AUDIO_DEVICE_NONE), mPatchHandle(0), mRefCount(0),
-      mInputSource(AUDIO_SOURCE_DEFAULT), mProfile(profile)
+      mInputSource(AUDIO_SOURCE_DEFAULT), mProfile(profile), mIsSoundTrigger(false)
 {
     if (profile != NULL) {
         mAudioPort = profile;
diff --git a/services/audiopolicy/AudioPolicyManager.h b/services/audiopolicy/AudioPolicyManager.h
index e28a362..95aab65 100644
--- a/services/audiopolicy/AudioPolicyManager.h
+++ b/services/audiopolicy/AudioPolicyManager.h
@@ -172,6 +172,12 @@
         virtual status_t setAudioPortConfig(const struct audio_port_config *config);
         virtual void clearAudioPatches(uid_t uid);
 
+        virtual status_t acquireSoundTriggerSession(audio_session_t *session,
+                                               audio_io_handle_t *ioHandle,
+                                               audio_devices_t *device);
+
+        virtual status_t releaseSoundTriggerSession(audio_session_t session);
+
 protected:
 
         enum routing_strategy {
@@ -202,6 +208,7 @@
             DEVICE_CATEGORY_HEADSET,
             DEVICE_CATEGORY_SPEAKER,
             DEVICE_CATEGORY_EARPIECE,
+            DEVICE_CATEGORY_EXT_MEDIA,
             DEVICE_CATEGORY_CNT
         };
 
@@ -408,6 +415,8 @@
         static const VolumeCurvePoint sDefaultVolumeCurve[AudioPolicyManager::VOLCNT];
         // default volume curve for media strategy
         static const VolumeCurvePoint sDefaultMediaVolumeCurve[AudioPolicyManager::VOLCNT];
+        // volume curve for non-media audio on ext media outputs (HDMI, Line, etc)
+        static const VolumeCurvePoint sExtMediaSystemVolumeCurve[AudioPolicyManager::VOLCNT];
         // volume curve for media strategy on speakers
         static const VolumeCurvePoint sSpeakerMediaVolumeCurve[AudioPolicyManager::VOLCNT];
         static const VolumeCurvePoint sSpeakerMediaVolumeCurveDrc[AudioPolicyManager::VOLCNT];
@@ -477,15 +486,18 @@
 
             status_t    dump(int fd);
 
-            audio_port_handle_t mId;
-            audio_io_handle_t mIoHandle;              // input handle
-            audio_devices_t mDevice;                    // current device this input is routed to
-            audio_patch_handle_t mPatchHandle;
-            uint32_t mRefCount;                         // number of AudioRecord clients using this output
-            uint32_t mOpenRefCount;
-            audio_source_t mInputSource;                // input source selected by application (mediarecorder.h)
-            const sp<IOProfile> mProfile;                  // I/O profile this output derives from
-            SortedVector<audio_session_t> mSessions;  // audio sessions attached to this input
+            audio_port_handle_t           mId;
+            audio_io_handle_t             mIoHandle;       // input handle
+            audio_devices_t               mDevice;         // current device this input is routed to
+            audio_patch_handle_t          mPatchHandle;
+            uint32_t                      mRefCount;       // number of AudioRecord clients using
+                                                           // this input
+            uint32_t                      mOpenRefCount;
+            audio_source_t                mInputSource;    // input source selected by application
+                                                           //(mediarecorder.h)
+            const sp<IOProfile>           mProfile;        // I/O profile this output derives from
+            SortedVector<audio_session_t> mSessions;       // audio sessions attached to this input
+            bool                          mIsSoundTrigger; // used by a soundtrigger capture
 
             virtual void toAudioPortConfig(struct audio_port_config *dstConfig,
                                    const struct audio_port_config *srcConfig = NULL) const;
@@ -569,6 +581,8 @@
         //    ignoreVirtualInputs is true.
         audio_io_handle_t getActiveInput(bool ignoreVirtualInputs = true);
 
+        uint32_t activeInputsCount() const;
+
         // initialize volume curves for each strategy and device category
         void initializeVolumeCurves();
 
@@ -713,6 +727,11 @@
         sp<AudioInputDescriptor> getInputFromId(audio_port_handle_t id) const;
         sp<HwModule> getModuleForDevice(audio_devices_t device) const;
         sp<HwModule> getModuleFromName(const char *name) const;
+        audio_devices_t availablePrimaryOutputDevices();
+        audio_devices_t availablePrimaryInputDevices();
+
+        void updateCallRouting(audio_devices_t rxDevice, int delayMs = 0);
+
         //
         // Audio policy configuration file parsing (audio_policy.conf)
         //
@@ -769,6 +788,11 @@
 
         DefaultKeyedVector<audio_patch_handle_t, sp<AudioPatch> > mAudioPatches;
 
+        DefaultKeyedVector<audio_session_t, audio_io_handle_t> mSoundTriggerSessions;
+
+        sp<AudioPatch> mCallTxPatch;
+        sp<AudioPatch> mCallRxPatch;
+
 #ifdef AUDIO_POLICY_TEST
         Mutex   mLock;
         Condition mWaitWorkCV;
diff --git a/services/audiopolicy/AudioPolicyService.h b/services/audiopolicy/AudioPolicyService.h
index 97236e3..0044e7a 100644
--- a/services/audiopolicy/AudioPolicyService.h
+++ b/services/audiopolicy/AudioPolicyService.h
@@ -168,6 +168,12 @@
 
     virtual void registerClient(const sp<IAudioPolicyServiceClient>& client);
 
+    virtual status_t acquireSoundTriggerSession(audio_session_t *session,
+                                           audio_io_handle_t *ioHandle,
+                                           audio_devices_t *device);
+
+    virtual status_t releaseSoundTriggerSession(audio_session_t session);
+
             status_t doStopOutput(audio_io_handle_t output,
                                   audio_stream_type_t stream,
                                   int session = 0);
diff --git a/services/camera/libcameraservice/api1/Camera2Client.cpp b/services/camera/libcameraservice/api1/Camera2Client.cpp
index 9721e13..046988e 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.cpp
+++ b/services/camera/libcameraservice/api1/Camera2Client.cpp
@@ -810,7 +810,9 @@
             return res;
         }
     }
-    if (params.zslMode && !params.recordingHint) {
+
+    if (params.zslMode && !params.recordingHint &&
+            getRecordingStreamId() == NO_STREAM) {
         res = updateProcessorStream(mZslProcessor, params);
         if (res != OK) {
             ALOGE("%s: Camera %d: Unable to update ZSL stream: %s (%d)",
@@ -1033,6 +1035,36 @@
             return res;
         }
     }
+
+    if (mZslProcessor->getStreamId() != NO_STREAM) {
+        ALOGV("%s: Camera %d: Clearing out zsl stream before "
+                "creating recording stream", __FUNCTION__, mCameraId);
+        res = mStreamingProcessor->stopStream();
+        if (res != OK) {
+            ALOGE("%s: Camera %d: Can't stop streaming to delete callback stream",
+                    __FUNCTION__, mCameraId);
+            return res;
+        }
+        res = mDevice->waitUntilDrained();
+        if (res != OK) {
+            ALOGE("%s: Camera %d: Waiting to stop streaming failed: %s (%d)",
+                    __FUNCTION__, mCameraId, strerror(-res), res);
+        }
+        res = mZslProcessor->clearZslQueue();
+        if (res != OK) {
+            ALOGE("%s: Camera %d: Can't clear zsl queue",
+                    __FUNCTION__, mCameraId);
+            return res;
+        }
+        res = mZslProcessor->deleteStream();
+        if (res != OK) {
+            ALOGE("%s: Camera %d: Unable to delete zsl stream before "
+                    "record: %s (%d)", __FUNCTION__, mCameraId,
+                    strerror(-res), res);
+            return res;
+        }
+    }
+
     // Disable callbacks if they're enabled; can't record and use callbacks,
     // and we can't fail record start without stagefright asserting.
     params.previewCallbackFlags = 0;
diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp b/services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp
index ab61c44..37de610 100644
--- a/services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp
+++ b/services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp
@@ -489,6 +489,22 @@
                     continue;
                 }
 
+                // Make sure the candidate frame has good focus.
+                entry = frame.find(ANDROID_CONTROL_AF_STATE);
+                if (entry.count == 0) {
+                    ALOGW("%s: ZSL queue frame has no AF state field!",
+                            __FUNCTION__);
+                    continue;
+                }
+                uint8_t afState = entry.data.u8[0];
+                if (afState != ANDROID_CONTROL_AF_STATE_PASSIVE_FOCUSED &&
+                        afState != ANDROID_CONTROL_AF_STATE_FOCUSED_LOCKED &&
+                        afState != ANDROID_CONTROL_AF_STATE_NOT_FOCUSED_LOCKED) {
+                    ALOGW("%s: ZSL queue frame AF state is %d is not good for capture, skip it",
+                            __FUNCTION__, afState);
+                    continue;
+                }
+
                 minTimestamp = frameTimestamp;
                 idx = j;
             }
diff --git a/services/soundtrigger/Android.mk b/services/soundtrigger/Android.mk
index b7ccaab..572ae56 100644
--- a/services/soundtrigger/Android.mk
+++ b/services/soundtrigger/Android.mk
@@ -31,10 +31,14 @@
     libbinder \
     libcutils \
     libhardware \
-    libsoundtrigger
+    libsoundtrigger \
+    libmedia
 
-#LOCAL_C_INCLUDES += \
+LOCAL_STATIC_LIBRARIES := \
+    libserviceutility
 
+LOCAL_C_INCLUDES += \
+    $(TOPDIR)frameworks/av/services/audioflinger
 
 LOCAL_MODULE:= libsoundtriggerservice
 
diff --git a/services/soundtrigger/SoundTriggerHwService.cpp b/services/soundtrigger/SoundTriggerHwService.cpp
index 747af79..2502e0d 100644
--- a/services/soundtrigger/SoundTriggerHwService.cpp
+++ b/services/soundtrigger/SoundTriggerHwService.cpp
@@ -22,18 +22,19 @@
 #include <sys/types.h>
 #include <pthread.h>
 
-#include <binder/IServiceManager.h>
-#include <binder/MemoryBase.h>
-#include <binder/MemoryHeapBase.h>
+#include <system/sound_trigger.h>
 #include <cutils/atomic.h>
 #include <cutils/properties.h>
 #include <hardware/hardware.h>
+#include <media/AudioSystem.h>
 #include <utils/Errors.h>
 #include <utils/Log.h>
-
-#include "SoundTriggerHwService.h"
-#include <system/sound_trigger.h>
+#include <binder/IServiceManager.h>
+#include <binder/MemoryBase.h>
+#include <binder/MemoryHeapBase.h>
 #include <hardware/sound_trigger.h>
+#include <ServiceUtilities.h>
+#include "SoundTriggerHwService.h"
 
 namespace android {
 
@@ -45,7 +46,9 @@
 
 SoundTriggerHwService::SoundTriggerHwService()
     : BnSoundTriggerHwService(),
-      mNextUniqueId(1)
+      mNextUniqueId(1),
+      mMemoryDealer(new MemoryDealer(1024 * 1024, "SoundTriggerHwService")),
+      mCaptureState(false)
 {
 }
 
@@ -103,6 +106,10 @@
                              uint32_t *numModules)
 {
     ALOGV("listModules");
+    if (!captureHotwordAllowed()) {
+        return PERMISSION_DENIED;
+    }
+
     AutoMutex lock(mServiceLock);
     if (numModules == NULL || (*numModules != 0 && modules == NULL)) {
         return BAD_VALUE;
@@ -120,6 +127,10 @@
                         sp<ISoundTrigger>& moduleInterface)
 {
     ALOGV("attach module %d", handle);
+    if (!captureHotwordAllowed()) {
+        return PERMISSION_DENIED;
+    }
+
     AutoMutex lock(mServiceLock);
     moduleInterface.clear();
     if (client == 0) {
@@ -135,15 +146,31 @@
     client->asBinder()->linkToDeath(module);
     moduleInterface = module;
 
+    module->setCaptureState_l(mCaptureState);
+
     return NO_ERROR;
 }
 
-void SoundTriggerHwService::detachModule(sp<Module> module) {
+status_t SoundTriggerHwService::setCaptureState(bool active)
+{
+    ALOGV("setCaptureState %d", active);
     AutoMutex lock(mServiceLock);
+    mCaptureState = active;
+    for (size_t i = 0; i < mModules.size(); i++) {
+        mModules.valueAt(i)->setCaptureState_l(active);
+    }
+    return NO_ERROR;
+}
+
+
+void SoundTriggerHwService::detachModule(sp<Module> module)
+{
     ALOGV("detachModule");
+    AutoMutex lock(mServiceLock);
     module->clearClient();
 }
 
+
 static const int kDumpLockRetries = 50;
 static const int kDumpLockSleep = 60000;
 
@@ -192,18 +219,175 @@
     if (module == NULL) {
         return;
     }
-    module->sendRecognitionEvent(event);
+    sp<SoundTriggerHwService> service = module->service().promote();
+    if (service == 0) {
+        return;
+    }
+
+    service->sendRecognitionEvent(event, module);
+}
+
+sp<IMemory> SoundTriggerHwService::prepareRecognitionEvent_l(
+                                                    struct sound_trigger_recognition_event *event)
+{
+    sp<IMemory> eventMemory;
+
+    //sanitize event
+    switch (event->type) {
+    case SOUND_MODEL_TYPE_KEYPHRASE:
+        ALOGW_IF(event->data_size != 0 && event->data_offset !=
+                    sizeof(struct sound_trigger_phrase_recognition_event),
+                    "prepareRecognitionEvent_l(): invalid data offset %u for keyphrase event type",
+                    event->data_offset);
+        event->data_offset = sizeof(struct sound_trigger_phrase_recognition_event);
+        break;
+    case SOUND_MODEL_TYPE_UNKNOWN:
+        ALOGW_IF(event->data_size != 0 && event->data_offset !=
+                    sizeof(struct sound_trigger_recognition_event),
+                    "prepareRecognitionEvent_l(): invalid data offset %u for unknown event type",
+                    event->data_offset);
+        event->data_offset = sizeof(struct sound_trigger_recognition_event);
+        break;
+    default:
+            return eventMemory;
+    }
+
+    size_t size = event->data_offset + event->data_size;
+    eventMemory = mMemoryDealer->allocate(size);
+    if (eventMemory == 0 || eventMemory->pointer() == NULL) {
+        eventMemory.clear();
+        return eventMemory;
+    }
+    memcpy(eventMemory->pointer(), event, size);
+
+    return eventMemory;
+}
+
+void SoundTriggerHwService::sendRecognitionEvent(struct sound_trigger_recognition_event *event,
+                                                 Module *module)
+ {
+     AutoMutex lock(mServiceLock);
+     if (module == NULL) {
+         return;
+     }
+     sp<IMemory> eventMemory = prepareRecognitionEvent_l(event);
+     if (eventMemory == 0) {
+         return;
+     }
+     sp<Module> strongModule;
+     for (size_t i = 0; i < mModules.size(); i++) {
+         if (mModules.valueAt(i).get() == module) {
+             strongModule = mModules.valueAt(i);
+             break;
+         }
+     }
+     if (strongModule == 0) {
+         return;
+     }
+
+     sendCallbackEvent_l(new CallbackEvent(CallbackEvent::TYPE_RECOGNITION,
+                                                  eventMemory, strongModule));
+}
+
+// static
+void SoundTriggerHwService::soundModelCallback(struct sound_trigger_model_event *event,
+                                               void *cookie)
+{
+    Module *module = (Module *)cookie;
+    if (module == NULL) {
+        return;
+    }
+    sp<SoundTriggerHwService> service = module->service().promote();
+    if (service == 0) {
+        return;
+    }
+
+    service->sendSoundModelEvent(event, module);
+}
+
+sp<IMemory> SoundTriggerHwService::prepareSoundModelEvent_l(struct sound_trigger_model_event *event)
+{
+    sp<IMemory> eventMemory;
+
+    size_t size = event->data_offset + event->data_size;
+    eventMemory = mMemoryDealer->allocate(size);
+    if (eventMemory == 0 || eventMemory->pointer() == NULL) {
+        eventMemory.clear();
+        return eventMemory;
+    }
+    memcpy(eventMemory->pointer(), event, size);
+
+    return eventMemory;
+}
+
+void SoundTriggerHwService::sendSoundModelEvent(struct sound_trigger_model_event *event,
+                                                Module *module)
+{
+    AutoMutex lock(mServiceLock);
+    sp<IMemory> eventMemory = prepareSoundModelEvent_l(event);
+    if (eventMemory == 0) {
+        return;
+    }
+    sp<Module> strongModule;
+    for (size_t i = 0; i < mModules.size(); i++) {
+        if (mModules.valueAt(i).get() == module) {
+            strongModule = mModules.valueAt(i);
+            break;
+        }
+    }
+    if (strongModule == 0) {
+        return;
+    }
+    sendCallbackEvent_l(new CallbackEvent(CallbackEvent::TYPE_SOUNDMODEL,
+                                                 eventMemory, strongModule));
 }
 
 
-void SoundTriggerHwService::sendRecognitionEvent(const sp<RecognitionEvent>& event)
+sp<IMemory> SoundTriggerHwService::prepareServiceStateEvent_l(sound_trigger_service_state_t state)
 {
-    mCallbackThread->sendRecognitionEvent(event);
+    sp<IMemory> eventMemory;
+
+    size_t size = sizeof(sound_trigger_service_state_t);
+    eventMemory = mMemoryDealer->allocate(size);
+    if (eventMemory == 0 || eventMemory->pointer() == NULL) {
+        eventMemory.clear();
+        return eventMemory;
+    }
+    *((sound_trigger_service_state_t *)eventMemory->pointer()) = state;
+    return eventMemory;
 }
 
-void SoundTriggerHwService::onRecognitionEvent(const sp<RecognitionEvent>& event)
+// call with mServiceLock held
+void SoundTriggerHwService::sendServiceStateEvent_l(sound_trigger_service_state_t state,
+                                                  Module *module)
 {
-    ALOGV("onRecognitionEvent");
+    sp<IMemory> eventMemory = prepareServiceStateEvent_l(state);
+    if (eventMemory == 0) {
+        return;
+    }
+    sp<Module> strongModule;
+    for (size_t i = 0; i < mModules.size(); i++) {
+        if (mModules.valueAt(i).get() == module) {
+            strongModule = mModules.valueAt(i);
+            break;
+        }
+    }
+    if (strongModule == 0) {
+        return;
+    }
+    sendCallbackEvent_l(new CallbackEvent(CallbackEvent::TYPE_SERVICE_STATE,
+                                                 eventMemory, strongModule));
+}
+
+// call with mServiceLock held
+void SoundTriggerHwService::sendCallbackEvent_l(const sp<CallbackEvent>& event)
+{
+    mCallbackThread->sendCallbackEvent(event);
+}
+
+void SoundTriggerHwService::onCallbackEvent(const sp<CallbackEvent>& event)
+{
+    ALOGV("onCallbackEvent");
     sp<Module> module;
     {
         AutoMutex lock(mServiceLock);
@@ -212,15 +396,12 @@
             return;
         }
     }
-    module->onRecognitionEvent(event->mEventMemory);
-}
-
-// static
-void SoundTriggerHwService::soundModelCallback(struct sound_trigger_model_event *event __unused,
-                                               void *cookie)
-{
-    Module *module = (Module *)cookie;
-
+    module->onCallbackEvent(event);
+    {
+        AutoMutex lock(mServiceLock);
+        // clear now to execute with mServiceLock locked
+        event->mMemory.clear();
+    }
 }
 
 #undef LOG_TAG
@@ -233,7 +414,10 @@
 
 SoundTriggerHwService::CallbackThread::~CallbackThread()
 {
-    mEventQueue.clear();
+    while (!mEventQueue.isEmpty()) {
+        mEventQueue[0]->mMemory.clear();
+        mEventQueue.removeAt(0);
+    }
 }
 
 void SoundTriggerHwService::CallbackThread::onFirstRef()
@@ -244,7 +428,7 @@
 bool SoundTriggerHwService::CallbackThread::threadLoop()
 {
     while (!exitPending()) {
-        sp<RecognitionEvent> event;
+        sp<CallbackEvent> event;
         sp<SoundTriggerHwService> service;
         {
             Mutex::Autolock _l(mCallbackLock);
@@ -261,7 +445,7 @@
             service = mService.promote();
         }
         if (service != 0) {
-            service->onRecognitionEvent(event);
+            service->onCallbackEvent(event);
         }
     }
     return false;
@@ -274,25 +458,25 @@
     mCallbackCond.broadcast();
 }
 
-void SoundTriggerHwService::CallbackThread::sendRecognitionEvent(
-                        const sp<SoundTriggerHwService::RecognitionEvent>& event)
+void SoundTriggerHwService::CallbackThread::sendCallbackEvent(
+                        const sp<SoundTriggerHwService::CallbackEvent>& event)
 {
     AutoMutex lock(mCallbackLock);
     mEventQueue.add(event);
     mCallbackCond.signal();
 }
 
-SoundTriggerHwService::RecognitionEvent::RecognitionEvent(
-                                            sp<IMemory> eventMemory,
-                                            wp<Module> module)
-    : mEventMemory(eventMemory), mModule(module)
+SoundTriggerHwService::CallbackEvent::CallbackEvent(event_type type, sp<IMemory> memory,
+                                                    wp<Module> module)
+    : mType(type), mMemory(memory), mModule(module)
 {
 }
 
-SoundTriggerHwService::RecognitionEvent::~RecognitionEvent()
+SoundTriggerHwService::CallbackEvent::~CallbackEvent()
 {
 }
 
+
 #undef LOG_TAG
 #define LOG_TAG "SoundTriggerHwService::Module"
 
@@ -301,7 +485,7 @@
                                       sound_trigger_module_descriptor descriptor,
                                       const sp<ISoundTriggerClient>& client)
  : mService(service), mHwDevice(hwDevice), mDescriptor(descriptor),
-   mClient(client)
+   mClient(client), mServiceState(SOUND_TRIGGER_STATE_NO_INIT)
 {
 }
 
@@ -310,6 +494,9 @@
 
 void SoundTriggerHwService::Module::detach() {
     ALOGV("detach()");
+    if (!captureHotwordAllowed()) {
+        return;
+    }
     {
         AutoMutex lock(mLock);
         for (size_t i = 0; i < mModels.size(); i++) {
@@ -317,7 +504,6 @@
             ALOGV("detach() unloading model %d", model->mHandle);
             if (model->mState == Model::STATE_ACTIVE) {
                 mHwDevice->stop_recognition(mHwDevice, model->mHandle);
-                model->deallocateMemory();
             }
             mHwDevice->unload_sound_model(mHwDevice, model->mHandle);
         }
@@ -337,6 +523,9 @@
                                 sound_model_handle_t *handle)
 {
     ALOGV("loadSoundModel() handle");
+    if (!captureHotwordAllowed()) {
+        return PERMISSION_DENIED;
+    }
 
     if (modelMemory == 0 || modelMemory->pointer() == NULL) {
         ALOGE("loadSoundModel() modelMemory is 0 or has NULL pointer()");
@@ -351,9 +540,20 @@
                                                   SoundTriggerHwService::soundModelCallback,
                                                   this,
                                                   handle);
-    if (status == NO_ERROR) {
-        mModels.replaceValueFor(*handle, new Model(*handle));
+    if (status != NO_ERROR) {
+        return status;
     }
+    audio_session_t session;
+    audio_io_handle_t ioHandle;
+    audio_devices_t device;
+
+    status = AudioSystem::acquireSoundTriggerSession(&session, &ioHandle, &device);
+    if (status != NO_ERROR) {
+        return status;
+    }
+
+    sp<Model> model = new Model(*handle, session, ioHandle, device, sound_model->type);
+    mModels.replaceValueFor(*handle, model);
 
     return status;
 }
@@ -361,6 +561,9 @@
 status_t SoundTriggerHwService::Module::unloadSoundModel(sound_model_handle_t handle)
 {
     ALOGV("unloadSoundModel() model handle %d", handle);
+    if (!captureHotwordAllowed()) {
+        return PERMISSION_DENIED;
+    }
 
     AutoMutex lock(mLock);
     ssize_t index = mModels.indexOfKey(handle);
@@ -371,8 +574,8 @@
     mModels.removeItem(handle);
     if (model->mState == Model::STATE_ACTIVE) {
         mHwDevice->stop_recognition(mHwDevice, model->mHandle);
-        model->deallocateMemory();
     }
+    AudioSystem::releaseSoundTriggerSession(model->mCaptureSession);
     return mHwDevice->unload_sound_model(mHwDevice, handle);
 }
 
@@ -380,6 +583,9 @@
                                  const sp<IMemory>& dataMemory)
 {
     ALOGV("startRecognition() model handle %d", handle);
+    if (!captureHotwordAllowed()) {
+        return PERMISSION_DENIED;
+    }
 
     if (dataMemory != 0 && dataMemory->pointer() == NULL) {
         ALOGE("startRecognition() dataMemory is non-0 but has NULL pointer()");
@@ -387,6 +593,9 @@
 
     }
     AutoMutex lock(mLock);
+    if (mServiceState == SOUND_TRIGGER_STATE_DISABLED) {
+        return INVALID_OPERATION;
+    }
     sp<Model> model = getModel(handle);
     if (model == 0) {
         return BAD_VALUE;
@@ -399,22 +608,31 @@
     if (model->mState == Model::STATE_ACTIVE) {
         return INVALID_OPERATION;
     }
-    model->mState = Model::STATE_ACTIVE;
 
     struct sound_trigger_recognition_config *config =
             (struct sound_trigger_recognition_config *)dataMemory->pointer();
 
     //TODO: get capture handle and device from audio policy service
-    config->capture_handle = AUDIO_IO_HANDLE_NONE;
-    config->capture_device = AUDIO_DEVICE_NONE;
-    return mHwDevice->start_recognition(mHwDevice, handle, config,
+    config->capture_handle = model->mCaptureIOHandle;
+    config->capture_device = model->mCaptureDevice;
+    status_t status = mHwDevice->start_recognition(mHwDevice, handle, config,
                                         SoundTriggerHwService::recognitionCallback,
                                         this);
+
+    if (status == NO_ERROR) {
+        model->mState = Model::STATE_ACTIVE;
+        model->mConfig = *config;
+    }
+
+    return status;
 }
 
 status_t SoundTriggerHwService::Module::stopRecognition(sound_model_handle_t handle)
 {
     ALOGV("stopRecognition() model handle %d", handle);
+    if (!captureHotwordAllowed()) {
+        return PERMISSION_DENIED;
+    }
 
     AutoMutex lock(mLock);
     sp<Model> model = getModel(handle);
@@ -426,93 +644,62 @@
         return INVALID_OPERATION;
     }
     mHwDevice->stop_recognition(mHwDevice, handle);
-    model->deallocateMemory();
     model->mState = Model::STATE_IDLE;
     return NO_ERROR;
 }
 
-void SoundTriggerHwService::Module::sendRecognitionEvent(
-                                                    struct sound_trigger_recognition_event *event)
+
+void SoundTriggerHwService::Module::onCallbackEvent(const sp<CallbackEvent>& event)
 {
-    sp<SoundTriggerHwService> service;
-    sp<IMemory> eventMemory;
-    ALOGV("sendRecognitionEvent for model %d", event->model);
-    {
-        AutoMutex lock(mLock);
-        sp<Model> model = getModel(event->model);
-        if (model == 0) {
-            return;
-        }
-        if (model->mState != Model::STATE_ACTIVE) {
-            ALOGV("sendRecognitionEvent model->mState %d != Model::STATE_ACTIVE", model->mState);
-            return;
-        }
-        if (mClient == 0) {
-            return;
-        }
-        service = mService.promote();
-        if (service == 0) {
-            return;
-        }
-
-        //sanitize event
-        switch (event->type) {
-        case SOUND_MODEL_TYPE_KEYPHRASE:
-            ALOGW_IF(event->data_offset !=
-                        sizeof(struct sound_trigger_phrase_recognition_event),
-                        "sendRecognitionEvent(): invalid data offset %u for keyphrase event type",
-                        event->data_offset);
-            event->data_offset = sizeof(struct sound_trigger_phrase_recognition_event);
-            break;
-        case SOUND_MODEL_TYPE_UNKNOWN:
-            ALOGW_IF(event->data_offset !=
-                        sizeof(struct sound_trigger_recognition_event),
-                        "sendRecognitionEvent(): invalid data offset %u for unknown event type",
-                        event->data_offset);
-            event->data_offset = sizeof(struct sound_trigger_recognition_event);
-            break;
-        default:
-                return;
-        }
-
-        size_t size = event->data_offset + event->data_size;
-        eventMemory = model->allocateMemory(size);
-        if (eventMemory == 0 || eventMemory->pointer() == NULL) {
-            return;
-        }
-        memcpy(eventMemory->pointer(), event, size);
-    }
-    service->sendRecognitionEvent(new RecognitionEvent(eventMemory, this));
-}
-
-void SoundTriggerHwService::Module::onRecognitionEvent(sp<IMemory> eventMemory)
-{
-    ALOGV("Module::onRecognitionEvent");
+    ALOGV("onCallbackEvent type %d", event->mType);
 
     AutoMutex lock(mLock);
+    sp<IMemory> eventMemory = event->mMemory;
 
     if (eventMemory == 0 || eventMemory->pointer() == NULL) {
         return;
     }
-    struct sound_trigger_recognition_event *event =
-            (struct sound_trigger_recognition_event *)eventMemory->pointer();
-
-    sp<Model> model = getModel(event->model);
-    if (model == 0) {
-        ALOGI("%s model == 0", __func__);
-        return;
-    }
-    if (model->mState != Model::STATE_ACTIVE) {
-        ALOGV("onRecognitionEvent model->mState %d != Model::STATE_ACTIVE", model->mState);
-        return;
-    }
     if (mClient == 0) {
         ALOGI("%s mClient == 0", __func__);
         return;
     }
-    mClient->onRecognitionEvent(eventMemory);
-    model->mState = Model::STATE_IDLE;
-    model->deallocateMemory();
+
+    switch (event->mType) {
+    case CallbackEvent::TYPE_RECOGNITION: {
+        struct sound_trigger_recognition_event *recognitionEvent =
+                (struct sound_trigger_recognition_event *)eventMemory->pointer();
+
+        sp<Model> model = getModel(recognitionEvent->model);
+        if (model == 0) {
+            ALOGW("%s model == 0", __func__);
+            return;
+        }
+        if (model->mState != Model::STATE_ACTIVE) {
+            ALOGV("onCallbackEvent model->mState %d != Model::STATE_ACTIVE", model->mState);
+            return;
+        }
+
+        recognitionEvent->capture_session = model->mCaptureSession;
+        mClient->onRecognitionEvent(eventMemory);
+        model->mState = Model::STATE_IDLE;
+    } break;
+    case CallbackEvent::TYPE_SOUNDMODEL: {
+        struct sound_trigger_model_event *soundmodelEvent =
+                (struct sound_trigger_model_event *)eventMemory->pointer();
+
+        sp<Model> model = getModel(soundmodelEvent->model);
+        if (model == 0) {
+            ALOGW("%s model == 0", __func__);
+            return;
+        }
+        mClient->onSoundModelEvent(eventMemory);
+    } break;
+    case CallbackEvent::TYPE_SERVICE_STATE: {
+        mClient->onServiceStateChange(eventMemory);
+    } break;
+    default:
+        LOG_ALWAYS_FATAL("onCallbackEvent unknown event type %d", event->mType);
+    }
 }
 
 sp<SoundTriggerHwService::Model> SoundTriggerHwService::Module::getModel(
@@ -532,30 +719,80 @@
     detach();
 }
 
-
-SoundTriggerHwService::Model::Model(sound_model_handle_t handle) :
-    mHandle(handle), mState(STATE_IDLE), mInputHandle(AUDIO_IO_HANDLE_NONE),
-    mCaptureSession(AUDIO_SESSION_ALLOCATE),
-    mMemoryDealer(new MemoryDealer(sizeof(struct sound_trigger_recognition_event),
-                                   "SoundTriggerHwService::Event"))
+// Called with mServiceLock held
+void SoundTriggerHwService::Module::setCaptureState_l(bool active)
 {
+    ALOGV("Module::setCaptureState_l %d", active);
+    sp<SoundTriggerHwService> service;
+    sound_trigger_service_state_t state;
 
-}
+    Vector< sp<IMemory> > events;
+    {
+        AutoMutex lock(mLock);
+        state = (active && !mDescriptor.properties.concurrent_capture) ?
+                                        SOUND_TRIGGER_STATE_DISABLED : SOUND_TRIGGER_STATE_ENABLED;
 
+        if (state == mServiceState) {
+            return;
+        }
 
-sp<IMemory> SoundTriggerHwService::Model::allocateMemory(size_t size)
-{
-    sp<IMemory> memory;
-    if (mMemoryDealer->getMemoryHeap()->getSize() < size) {
-        mMemoryDealer = new MemoryDealer(size, "SoundTriggerHwService::Event");
+        mServiceState = state;
+
+        service = mService.promote();
+        if (service == 0) {
+            return;
+        }
+
+        if (state == SOUND_TRIGGER_STATE_ENABLED) {
+            goto exit;
+        }
+
+        for (size_t i = 0; i < mModels.size(); i++) {
+            sp<Model> model = mModels.valueAt(i);
+            if (model->mState == Model::STATE_ACTIVE) {
+                mHwDevice->stop_recognition(mHwDevice, model->mHandle);
+                // keep model in ACTIVE state so that event is processed by onCallbackEvent()
+                struct sound_trigger_phrase_recognition_event phraseEvent;
+                switch (model->mType) {
+                case SOUND_MODEL_TYPE_KEYPHRASE:
+                    phraseEvent.num_phrases = model->mConfig.num_phrases;
+                    for (size_t i = 0; i < phraseEvent.num_phrases; i++) {
+                        phraseEvent.phrase_extras[i] = model->mConfig.phrases[i];
+                    }
+                    break;
+                case SOUND_MODEL_TYPE_UNKNOWN:
+                default:
+                    break;
+                }
+                phraseEvent.common.status = RECOGNITION_STATUS_ABORT;
+                phraseEvent.common.type = model->mType;
+                phraseEvent.common.model = model->mHandle;
+                phraseEvent.common.data_size = 0;
+                sp<IMemory> eventMemory = service->prepareRecognitionEvent_l(&phraseEvent.common);
+                if (eventMemory != 0) {
+                    events.add(eventMemory);
+                }
+            }
+        }
     }
-    memory = mMemoryDealer->allocate(size);
-    return memory;
+
+    for (size_t i = 0; i < events.size(); i++) {
+        service->sendCallbackEvent_l(new CallbackEvent(CallbackEvent::TYPE_RECOGNITION, events[i],
+                                                     this));
+    }
+
+exit:
+    service->sendServiceStateEvent_l(state, this);
 }
 
-void SoundTriggerHwService::Model::deallocateMemory()
+
+SoundTriggerHwService::Model::Model(sound_model_handle_t handle, audio_session_t session,
+                                    audio_io_handle_t ioHandle, audio_devices_t device,
+                                    sound_trigger_sound_model_type_t type) :
+    mHandle(handle), mState(STATE_IDLE), mCaptureSession(session),
+    mCaptureIOHandle(ioHandle), mCaptureDevice(device), mType(type)
 {
-    mMemoryDealer->deallocate(0);
+
 }
 
 status_t SoundTriggerHwService::Module::dump(int fd __unused,
diff --git a/services/soundtrigger/SoundTriggerHwService.h b/services/soundtrigger/SoundTriggerHwService.h
index 377f2a1..d05dacd 100644
--- a/services/soundtrigger/SoundTriggerHwService.h
+++ b/services/soundtrigger/SoundTriggerHwService.h
@@ -53,6 +53,8 @@
                             const sp<ISoundTriggerClient>& client,
                             sp<ISoundTrigger>& module);
 
+    virtual status_t setCaptureState(bool active);
+
     virtual status_t    onTransact(uint32_t code, const Parcel& data,
                                    Parcel* reply, uint32_t flags);
 
@@ -66,17 +68,33 @@
             STATE_ACTIVE
         };
 
-        Model(sound_model_handle_t handle);
+        Model(sound_model_handle_t handle, audio_session_t session, audio_io_handle_t ioHandle,
+              audio_devices_t device, sound_trigger_sound_model_type_t type);
         ~Model() {}
 
-        sp<IMemory> allocateMemory(size_t size);
-        void deallocateMemory();
-
         sound_model_handle_t    mHandle;
         int                     mState;
-        audio_io_handle_t       mInputHandle;
         audio_session_t         mCaptureSession;
-        sp<MemoryDealer>        mMemoryDealer;
+        audio_io_handle_t       mCaptureIOHandle;
+        audio_devices_t         mCaptureDevice;
+        sound_trigger_sound_model_type_t mType;
+        struct sound_trigger_recognition_config mConfig;
+    };
+
+    class CallbackEvent : public RefBase {
+    public:
+        typedef enum {
+            TYPE_RECOGNITION,
+            TYPE_SOUNDMODEL,
+            TYPE_SERVICE_STATE,
+        } event_type;
+        CallbackEvent(event_type type, sp<IMemory> memory, wp<Module> module);
+
+        virtual             ~CallbackEvent();
+
+        event_type mType;
+        sp<IMemory> mMemory;
+        wp<Module> mModule;
     };
 
     class Module : public virtual RefBase,
@@ -109,36 +127,29 @@
        struct sound_trigger_module_descriptor descriptor() { return mDescriptor; }
        void setClient(sp<ISoundTriggerClient> client) { mClient = client; }
        void clearClient() { mClient.clear(); }
-       sp<ISoundTriggerClient> client() { return mClient; }
+       sp<ISoundTriggerClient> client() const { return mClient; }
+       wp<SoundTriggerHwService> service() const { return mService; }
 
-       void sendRecognitionEvent(struct sound_trigger_recognition_event *event);
-       void onRecognitionEvent(sp<IMemory> eventMemory);
+       void onCallbackEvent(const sp<CallbackEvent>& event);
 
        sp<Model> getModel(sound_model_handle_t handle);
 
+       void setCaptureState_l(bool active);
+
        // IBinder::DeathRecipient implementation
        virtual void        binderDied(const wp<IBinder> &who);
 
     private:
+
         Mutex                                  mLock;
         wp<SoundTriggerHwService>              mService;
         struct sound_trigger_hw_device*        mHwDevice;
         struct sound_trigger_module_descriptor mDescriptor;
         sp<ISoundTriggerClient>                mClient;
         DefaultKeyedVector< sound_model_handle_t, sp<Model> >     mModels;
+        sound_trigger_service_state_t          mServiceState;
     }; // class Module
 
-    class RecognitionEvent : public RefBase {
-    public:
-
-        RecognitionEvent(sp<IMemory> eventMemory, wp<Module> module);
-
-        virtual             ~RecognitionEvent();
-
-        sp<IMemory> mEventMemory;
-        wp<Module> mModule;
-    };
-
     class CallbackThread : public Thread {
     public:
 
@@ -153,22 +164,30 @@
         virtual void        onFirstRef();
 
                 void        exit();
-                void        sendRecognitionEvent(const sp<RecognitionEvent>& event);
+                void        sendCallbackEvent(const sp<CallbackEvent>& event);
 
     private:
         wp<SoundTriggerHwService>   mService;
         Condition                   mCallbackCond;
         Mutex                       mCallbackLock;
-        Vector< sp<RecognitionEvent> > mEventQueue;
+        Vector< sp<CallbackEvent> > mEventQueue;
     };
 
-    void detachModule(sp<Module> module);
+           void detachModule(sp<Module> module);
 
     static void recognitionCallback(struct sound_trigger_recognition_event *event, void *cookie);
-    void sendRecognitionEvent(const sp<RecognitionEvent>& event);
-    void onRecognitionEvent(const sp<RecognitionEvent>& event);
+           sp<IMemory> prepareRecognitionEvent_l(struct sound_trigger_recognition_event *event);
+           void sendRecognitionEvent(struct sound_trigger_recognition_event *event, Module *module);
 
     static void soundModelCallback(struct sound_trigger_model_event *event, void *cookie);
+           sp<IMemory> prepareSoundModelEvent_l(struct sound_trigger_model_event *event);
+           void sendSoundModelEvent(struct sound_trigger_model_event *event, Module *module);
+
+           sp<IMemory> prepareServiceStateEvent_l(sound_trigger_service_state_t state);
+           void sendServiceStateEvent_l(sound_trigger_service_state_t state, Module *module);
+
+           void sendCallbackEvent_l(const sp<CallbackEvent>& event);
+           void onCallbackEvent(const sp<CallbackEvent>& event);
 
 private:
 
@@ -178,6 +197,8 @@
     volatile int32_t    mNextUniqueId;
     DefaultKeyedVector< sound_trigger_module_handle_t, sp<Module> >     mModules;
     sp<CallbackThread>  mCallbackThread;
+    sp<MemoryDealer>    mMemoryDealer;
+    bool                mCaptureState;
 };
 
 } // namespace android
diff --git a/soundtrigger/ISoundTriggerClient.cpp b/soundtrigger/ISoundTriggerClient.cpp
index 1d0c0ec..b0b4428 100644
--- a/soundtrigger/ISoundTriggerClient.cpp
+++ b/soundtrigger/ISoundTriggerClient.cpp
@@ -27,6 +27,8 @@
 
 enum {
     ON_RECOGNITION_EVENT = IBinder::FIRST_CALL_TRANSACTION,
+    ON_SOUNDMODEL_EVENT,
+    ON_SERVICE_STATE_CHANGE
 };
 
 class BpSoundTriggerClient: public BpInterface<ISoundTriggerClient>
@@ -47,6 +49,25 @@
                            data,
                            &reply);
     }
+
+    virtual void onSoundModelEvent(const sp<IMemory>& eventMemory)
+    {
+        Parcel data, reply;
+        data.writeInterfaceToken(ISoundTriggerClient::getInterfaceDescriptor());
+        data.writeStrongBinder(eventMemory->asBinder());
+        remote()->transact(ON_SOUNDMODEL_EVENT,
+                           data,
+                           &reply);
+    }
+    virtual void onServiceStateChange(const sp<IMemory>& eventMemory)
+    {
+        Parcel data, reply;
+        data.writeInterfaceToken(ISoundTriggerClient::getInterfaceDescriptor());
+        data.writeStrongBinder(eventMemory->asBinder());
+        remote()->transact(ON_SERVICE_STATE_CHANGE,
+                           data,
+                           &reply);
+    }
 };
 
 IMPLEMENT_META_INTERFACE(SoundTriggerClient,
@@ -65,6 +86,20 @@
             onRecognitionEvent(eventMemory);
             return NO_ERROR;
         } break;
+        case ON_SOUNDMODEL_EVENT: {
+            CHECK_INTERFACE(ISoundTriggerClient, data, reply);
+            sp<IMemory> eventMemory = interface_cast<IMemory>(
+                data.readStrongBinder());
+            onSoundModelEvent(eventMemory);
+            return NO_ERROR;
+        } break;
+        case ON_SERVICE_STATE_CHANGE: {
+            CHECK_INTERFACE(ISoundTriggerClient, data, reply);
+            sp<IMemory> eventMemory = interface_cast<IMemory>(
+                data.readStrongBinder());
+            onServiceStateChange(eventMemory);
+            return NO_ERROR;
+        } break;
         default:
             return BBinder::onTransact(code, data, reply, flags);
     }
diff --git a/soundtrigger/ISoundTriggerHwService.cpp b/soundtrigger/ISoundTriggerHwService.cpp
index c9a0c24..05728e9 100644
--- a/soundtrigger/ISoundTriggerHwService.cpp
+++ b/soundtrigger/ISoundTriggerHwService.cpp
@@ -37,6 +37,7 @@
 enum {
     LIST_MODULES = IBinder::FIRST_CALL_TRANSACTION,
     ATTACH,
+    SET_CAPTURE_STATE,
 };
 
 class BpSoundTriggerHwService: public BpInterface<ISoundTriggerHwService>
@@ -90,6 +91,18 @@
         return status;
     }
 
+    virtual status_t setCaptureState(bool active)
+    {
+        Parcel data, reply;
+        data.writeInterfaceToken(ISoundTriggerHwService::getInterfaceDescriptor());
+        data.writeInt32(active);
+        status_t status = remote()->transact(SET_CAPTURE_STATE, data, &reply);
+        if (status == NO_ERROR) {
+            status = reply.readInt32();
+        }
+        return status;
+    }
+
 };
 
 IMPLEMENT_META_INTERFACE(SoundTriggerHwService, "android.hardware.ISoundTriggerHwService");
@@ -140,6 +153,13 @@
             }
             return NO_ERROR;
         } break;
+
+        case SET_CAPTURE_STATE: {
+            CHECK_INTERFACE(ISoundTriggerHwService, data, reply);
+            reply->writeInt32(setCaptureState((bool)data.readInt32()));
+            return NO_ERROR;
+        } break;
+
         default:
             return BBinder::onTransact(code, data, reply, flags);
     }
diff --git a/soundtrigger/SoundTrigger.cpp b/soundtrigger/SoundTrigger.cpp
index e43acd0..0015c30 100644
--- a/soundtrigger/SoundTrigger.cpp
+++ b/soundtrigger/SoundTrigger.cpp
@@ -113,6 +113,16 @@
 }
 
 
+status_t SoundTrigger::setCaptureState(bool active)
+{
+    ALOGV("setCaptureState(%d)", active);
+    const sp<ISoundTriggerHwService>& service = getSoundTriggerHwService();
+    if (service == 0) {
+        return NO_INIT;
+    }
+    return service->setCaptureState(active);
+}
+
 // SoundTrigger
 SoundTrigger::SoundTrigger(sound_trigger_module_handle_t module,
                                  const sp<SoundTriggerCallback>& callback)
@@ -192,6 +202,31 @@
     }
 }
 
+void SoundTrigger::onSoundModelEvent(const sp<IMemory>& eventMemory)
+{
+    Mutex::Autolock _l(mLock);
+    if (eventMemory == 0 || eventMemory->pointer() == NULL) {
+        return;
+    }
+
+    if (mCallback != 0) {
+        mCallback->onSoundModelEvent(
+                (struct sound_trigger_model_event *)eventMemory->pointer());
+    }
+}
+
+void SoundTrigger::onServiceStateChange(const sp<IMemory>& eventMemory)
+{
+    Mutex::Autolock _l(mLock);
+    if (eventMemory == 0 || eventMemory->pointer() == NULL) {
+        return;
+    }
+
+    if (mCallback != 0) {
+        mCallback->onServiceStateChange(
+                *((sound_trigger_service_state_t *)eventMemory->pointer()));
+    }
+}
 
 //IBinder::DeathRecipient
 void SoundTrigger::binderDied(const wp<IBinder>& who __unused) {