Merge "Revert "NuPlayer: don't feed decoder input data during flushing." " into lmp-dev
diff --git a/include/media/AudioSystem.h b/include/media/AudioSystem.h
index cf34991..dd63a23 100644
--- a/include/media/AudioSystem.h
+++ b/include/media/AudioSystem.h
@@ -149,6 +149,11 @@
     static void acquireAudioSessionId(int audioSession, pid_t pid);
     static void releaseAudioSessionId(int audioSession, pid_t pid);
 
+    // Get the HW synchronization source used for an audio session.
+    // Return a valid source or AUDIO_HW_SYNC_INVALID if an error occurs
+    // or no HW sync source is used.
+    static audio_hw_sync_t getAudioHwSyncForSession(audio_session_t sessionId);
+
     // types of io configuration change events received with ioConfigChanged()
     enum io_config_event {
         OUTPUT_OPENED,
@@ -309,6 +314,12 @@
     /* Set audio port configuration */
     static status_t setAudioPortConfig(const struct audio_port_config *config);
 
+
+    static status_t acquireSoundTriggerSession(audio_session_t *session,
+                                           audio_io_handle_t *ioHandle,
+                                           audio_devices_t *device);
+    static status_t releaseSoundTriggerSession(audio_session_t session);
+
     // ----------------------------------------------------------------------------
 
     class AudioPortCallback : public RefBase
diff --git a/include/media/IAudioFlinger.h b/include/media/IAudioFlinger.h
index 82ec09c..31a14f0 100644
--- a/include/media/IAudioFlinger.h
+++ b/include/media/IAudioFlinger.h
@@ -235,6 +235,8 @@
     /* Set audio port configuration */
     virtual status_t setAudioPortConfig(const struct audio_port_config *config) = 0;
 
+    /* Get the HW synchronization source used for an audio session */
+    virtual audio_hw_sync_t getAudioHwSyncForSession(audio_session_t sessionId) = 0;
 };
 
 
diff --git a/include/media/IAudioPolicyService.h b/include/media/IAudioPolicyService.h
index abbda32..c251439 100644
--- a/include/media/IAudioPolicyService.h
+++ b/include/media/IAudioPolicyService.h
@@ -136,6 +136,12 @@
     virtual status_t setAudioPortConfig(const struct audio_port_config *config) = 0;
 
     virtual void registerClient(const sp<IAudioPolicyServiceClient>& client) = 0;
+
+    virtual status_t acquireSoundTriggerSession(audio_session_t *session,
+                                           audio_io_handle_t *ioHandle,
+                                           audio_devices_t *device) = 0;
+
+    virtual status_t releaseSoundTriggerSession(audio_session_t session) = 0;
 };
 
 
diff --git a/include/media/stagefright/ACodec.h b/include/media/stagefright/ACodec.h
index 7e4a1d9..a68adea 100644
--- a/include/media/stagefright/ACodec.h
+++ b/include/media/stagefright/ACodec.h
@@ -251,7 +251,7 @@
     status_t setupAACCodec(
             bool encoder,
             int32_t numChannels, int32_t sampleRate, int32_t bitRate,
-            int32_t aacProfile, bool isADTS);
+            int32_t aacProfile, bool isADTS, int32_t sbrMode);
 
     status_t setupAC3Codec(bool encoder, int32_t numChannels, int32_t sampleRate);
 
diff --git a/include/media/stagefright/DataSource.h b/include/media/stagefright/DataSource.h
index f8787dd..3fb9e36 100644
--- a/include/media/stagefright/DataSource.h
+++ b/include/media/stagefright/DataSource.h
@@ -31,6 +31,7 @@
 namespace android {
 
 struct AMessage;
+struct AString;
 struct IMediaHTTPService;
 class String8;
 
@@ -46,7 +47,8 @@
     static sp<DataSource> CreateFromURI(
             const sp<IMediaHTTPService> &httpService,
             const char *uri,
-            const KeyedVector<String8, String8> *headers = NULL);
+            const KeyedVector<String8, String8> *headers = NULL,
+            AString *sniffedMIME = NULL);
 
     DataSource() {}
 
@@ -100,6 +102,10 @@
     virtual ~DataSource() {}
 
 private:
+    enum {
+        kDefaultMetaSize = 200000,
+    };
+
     static Mutex gSnifferMutex;
     static List<SnifferFunc> gSniffers;
     static bool gSniffersRegistered;
diff --git a/include/media/stagefright/MediaExtractor.h b/include/media/stagefright/MediaExtractor.h
index 3076a96..183933a 100644
--- a/include/media/stagefright/MediaExtractor.h
+++ b/include/media/stagefright/MediaExtractor.h
@@ -65,6 +65,8 @@
     virtual char* getDrmTrackInfo(size_t trackID, int *len) {
         return NULL;
     }
+    virtual void setUID(uid_t uid) {
+    }
 
 protected:
     MediaExtractor() : mIsDrm(false) {}
diff --git a/include/media/stagefright/foundation/ABase.h b/include/media/stagefright/foundation/ABase.h
index 949d49e..72e3d87 100644
--- a/include/media/stagefright/foundation/ABase.h
+++ b/include/media/stagefright/foundation/ABase.h
@@ -18,6 +18,8 @@
 
 #define A_BASE_H_
 
+#define ARRAY_SIZE(a) (sizeof(a) / sizeof(*(a)))
+
 #define DISALLOW_EVIL_CONSTRUCTORS(name) \
     name(const name &); \
     name &operator=(const name &)
diff --git a/include/soundtrigger/ISoundTriggerClient.h b/include/soundtrigger/ISoundTriggerClient.h
index 7f86d02..480429a 100644
--- a/include/soundtrigger/ISoundTriggerClient.h
+++ b/include/soundtrigger/ISoundTriggerClient.h
@@ -31,6 +31,10 @@
 
     virtual void onRecognitionEvent(const sp<IMemory>& eventMemory) = 0;
 
+    virtual void onSoundModelEvent(const sp<IMemory>& eventMemory) = 0;
+
+    virtual void onServiceStateChange(const sp<IMemory>& eventMemory) = 0;
+
 };
 
 // ----------------------------------------------------------------------------
diff --git a/include/soundtrigger/ISoundTriggerHwService.h b/include/soundtrigger/ISoundTriggerHwService.h
index 05a764a..ae0cb01 100644
--- a/include/soundtrigger/ISoundTriggerHwService.h
+++ b/include/soundtrigger/ISoundTriggerHwService.h
@@ -39,6 +39,8 @@
     virtual status_t attach(const sound_trigger_module_handle_t handle,
                                       const sp<ISoundTriggerClient>& client,
                                       sp<ISoundTrigger>& module) = 0;
+
+    virtual status_t setCaptureState(bool active) = 0;
 };
 
 // ----------------------------------------------------------------------------
diff --git a/include/soundtrigger/SoundTrigger.h b/include/soundtrigger/SoundTrigger.h
index 1f7f286..bf5e1de 100644
--- a/include/soundtrigger/SoundTrigger.h
+++ b/include/soundtrigger/SoundTrigger.h
@@ -18,6 +18,7 @@
 #define ANDROID_HARDWARE_SOUNDTRIGGER_H
 
 #include <binder/IBinder.h>
+#include <utils/threads.h>
 #include <soundtrigger/SoundTriggerCallback.h>
 #include <soundtrigger/ISoundTrigger.h>
 #include <soundtrigger/ISoundTriggerHwService.h>
@@ -32,12 +33,15 @@
                         public IBinder::DeathRecipient
 {
 public:
+
+    virtual ~SoundTrigger();
+
     static  status_t listModules(struct sound_trigger_module_descriptor *modules,
                                  uint32_t *numModules);
     static  sp<SoundTrigger> attach(const sound_trigger_module_handle_t module,
                                        const sp<SoundTriggerCallback>& callback);
 
-            virtual ~SoundTrigger();
+    static  status_t setCaptureState(bool active);
 
             void detach();
 
@@ -51,6 +55,8 @@
 
             // BpSoundTriggerClient
             virtual void onRecognitionEvent(const sp<IMemory>& eventMemory);
+            virtual void onSoundModelEvent(const sp<IMemory>& eventMemory);
+            virtual void onServiceStateChange(const sp<IMemory>& eventMemory);
 
             //IBinder::DeathRecipient
             virtual void binderDied(const wp<IBinder>& who);
diff --git a/include/soundtrigger/SoundTriggerCallback.h b/include/soundtrigger/SoundTriggerCallback.h
index 8a5ba02..b5277f2 100644
--- a/include/soundtrigger/SoundTriggerCallback.h
+++ b/include/soundtrigger/SoundTriggerCallback.h
@@ -31,6 +31,10 @@
 
     virtual void onRecognitionEvent(struct sound_trigger_recognition_event *event) = 0;
 
+    virtual void onSoundModelEvent(struct sound_trigger_model_event *event) = 0;
+
+    virtual void onServiceStateChange(sound_trigger_service_state_t state) = 0;
+
     virtual void onServiceDied() = 0;
 
 };
diff --git a/media/libmedia/AudioSystem.cpp b/media/libmedia/AudioSystem.cpp
index 365a594..3486d21 100644
--- a/media/libmedia/AudioSystem.cpp
+++ b/media/libmedia/AudioSystem.cpp
@@ -450,6 +450,13 @@
     }
 }
 
+audio_hw_sync_t AudioSystem::getAudioHwSyncForSession(audio_session_t sessionId)
+{
+    const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+    if (af == 0) return AUDIO_HW_SYNC_INVALID;
+    return af->getAudioHwSyncForSession(sessionId);
+}
+
 // ---------------------------------------------------------------------------
 
 void AudioSystem::AudioFlingerClient::binderDied(const wp<IBinder>& who __unused)
@@ -913,6 +920,21 @@
     gAudioPortCallback = callBack;
 }
 
+status_t AudioSystem::acquireSoundTriggerSession(audio_session_t *session,
+                                       audio_io_handle_t *ioHandle,
+                                       audio_devices_t *device)
+{
+    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    if (aps == 0) return PERMISSION_DENIED;
+    return aps->acquireSoundTriggerSession(session, ioHandle, device);
+}
+
+status_t AudioSystem::releaseSoundTriggerSession(audio_session_t session)
+{
+    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    if (aps == 0) return PERMISSION_DENIED;
+    return aps->releaseSoundTriggerSession(session);
+}
 // ---------------------------------------------------------------------------
 
 void AudioSystem::AudioPolicyServiceClient::binderDied(const wp<IBinder>& who __unused)
diff --git a/media/libmedia/IAudioFlinger.cpp b/media/libmedia/IAudioFlinger.cpp
index 5331fce..346a192 100644
--- a/media/libmedia/IAudioFlinger.cpp
+++ b/media/libmedia/IAudioFlinger.cpp
@@ -79,7 +79,8 @@
     CREATE_AUDIO_PATCH,
     RELEASE_AUDIO_PATCH,
     LIST_AUDIO_PATCHES,
-    SET_AUDIO_PORT_CONFIG
+    SET_AUDIO_PORT_CONFIG,
+    GET_AUDIO_HW_SYNC
 };
 
 class BpAudioFlinger : public BpInterface<IAudioFlinger>
@@ -883,6 +884,17 @@
         }
         return status;
     }
+    virtual audio_hw_sync_t getAudioHwSyncForSession(audio_session_t sessionId)
+    {
+        Parcel data, reply;
+        data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
+        data.writeInt32(sessionId);
+        status_t status = remote()->transact(GET_AUDIO_HW_SYNC, data, &reply);
+        if (status != NO_ERROR) {
+            return AUDIO_HW_SYNC_INVALID;
+        }
+        return (audio_hw_sync_t)reply.readInt32();
+    }
 };
 
 IMPLEMENT_META_INTERFACE(AudioFlinger, "android.media.IAudioFlinger");
@@ -1345,6 +1357,11 @@
             reply->writeInt32(status);
             return NO_ERROR;
         } break;
+        case GET_AUDIO_HW_SYNC: {
+            CHECK_INTERFACE(IAudioFlinger, data, reply);
+            reply->writeInt32(getAudioHwSyncForSession((audio_session_t)data.readInt32()));
+            return NO_ERROR;
+        } break;
         default:
             return BBinder::onTransact(code, data, reply, flags);
     }
diff --git a/media/libmedia/IAudioPolicyService.cpp b/media/libmedia/IAudioPolicyService.cpp
index 1593b17..b57f747 100644
--- a/media/libmedia/IAudioPolicyService.cpp
+++ b/media/libmedia/IAudioPolicyService.cpp
@@ -65,7 +65,9 @@
     LIST_AUDIO_PATCHES,
     SET_AUDIO_PORT_CONFIG,
     REGISTER_CLIENT,
-    GET_OUTPUT_FOR_ATTR
+    GET_OUTPUT_FOR_ATTR,
+    ACQUIRE_SOUNDTRIGGER_SESSION,
+    RELEASE_SOUNDTRIGGER_SESSION
 };
 
 class BpAudioPolicyService : public BpInterface<IAudioPolicyService>
@@ -563,6 +565,7 @@
         }
         return status;
     }
+
     virtual void registerClient(const sp<IAudioPolicyServiceClient>& client)
     {
         Parcel data, reply;
@@ -570,6 +573,40 @@
         data.writeStrongBinder(client->asBinder());
         remote()->transact(REGISTER_CLIENT, data, &reply);
     }
+
+    virtual status_t acquireSoundTriggerSession(audio_session_t *session,
+                                            audio_io_handle_t *ioHandle,
+                                            audio_devices_t *device)
+    {
+        if (session == NULL || ioHandle == NULL || device == NULL) {
+            return BAD_VALUE;
+        }
+        Parcel data, reply;
+        data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+        status_t status = remote()->transact(ACQUIRE_SOUNDTRIGGER_SESSION, data, &reply);
+        if (status != NO_ERROR) {
+            return status;
+        }
+        status = (status_t)reply.readInt32();
+        if (status == NO_ERROR) {
+            *session = (audio_session_t)reply.readInt32();
+            *ioHandle = (audio_io_handle_t)reply.readInt32();
+            *device = (audio_devices_t)reply.readInt32();
+        }
+        return status;
+    }
+
+    virtual status_t releaseSoundTriggerSession(audio_session_t session)
+    {
+        Parcel data, reply;
+        data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+        data.writeInt32(session);
+        status_t status = remote()->transact(RELEASE_SOUNDTRIGGER_SESSION, data, &reply);
+        if (status != NO_ERROR) {
+            return status;
+        }
+        return (status_t)reply.readInt32();
+    }
 };
 
 IMPLEMENT_META_INTERFACE(AudioPolicyService, "android.media.IAudioPolicyService");
@@ -984,6 +1021,7 @@
             reply->writeInt32(status);
             return NO_ERROR;
         }
+
         case REGISTER_CLIENT: {
             CHECK_INTERFACE(IAudioPolicyService, data, reply);
             sp<IAudioPolicyServiceClient> client = interface_cast<IAudioPolicyServiceClient>(
@@ -992,6 +1030,33 @@
             return NO_ERROR;
         } break;
 
+        case ACQUIRE_SOUNDTRIGGER_SESSION: {
+            CHECK_INTERFACE(IAudioPolicyService, data, reply);
+            sp<IAudioPolicyServiceClient> client = interface_cast<IAudioPolicyServiceClient>(
+                    data.readStrongBinder());
+            audio_session_t session;
+            audio_io_handle_t ioHandle;
+            audio_devices_t device;
+            status_t status = acquireSoundTriggerSession(&session, &ioHandle, &device);
+            reply->writeInt32(status);
+            if (status == NO_ERROR) {
+                reply->writeInt32(session);
+                reply->writeInt32(ioHandle);
+                reply->writeInt32(device);
+            }
+            return NO_ERROR;
+        } break;
+
+        case RELEASE_SOUNDTRIGGER_SESSION: {
+            CHECK_INTERFACE(IAudioPolicyService, data, reply);
+            sp<IAudioPolicyServiceClient> client = interface_cast<IAudioPolicyServiceClient>(
+                    data.readStrongBinder());
+            audio_session_t session = (audio_session_t)data.readInt32();
+            status_t status = releaseSoundTriggerSession(session);
+            reply->writeInt32(status);
+            return NO_ERROR;
+        } break;
+
         default:
             return BBinder::onTransact(code, data, reply, flags);
     }
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.cpp b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
index a3e84df..9df3f53 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
@@ -14,6 +14,9 @@
  * limitations under the License.
  */
 
+//#define LOG_NDEBUG 0
+#define LOG_TAG "GenericSource"
+
 #include "GenericSource.h"
 
 #include "AnotherPacketSource.h"
@@ -34,10 +37,6 @@
 
 NuPlayer::GenericSource::GenericSource(
         const sp<AMessage> &notify,
-        const sp<IMediaHTTPService> &httpService,
-        const char *url,
-        const KeyedVector<String8, String8> *headers,
-        bool isWidevine,
         bool uidValid,
         uid_t uid)
     : Source(notify),
@@ -45,36 +44,41 @@
       mFetchTimedTextDataGeneration(0),
       mDurationUs(0ll),
       mAudioIsVorbis(false),
-      mIsWidevine(isWidevine),
+      mIsWidevine(false),
       mUIDValid(uidValid),
       mUID(uid) {
     DataSource::RegisterDefaultSniffers();
+}
+
+status_t NuPlayer::GenericSource::init(
+        const sp<IMediaHTTPService> &httpService,
+        const char *url,
+        const KeyedVector<String8, String8> *headers) {
+    mIsWidevine = !strncasecmp(url, "widevine://", 11);
+
+    AString sniffedMIME;
 
     sp<DataSource> dataSource =
-        DataSource::CreateFromURI(httpService, url, headers);
-    CHECK(dataSource != NULL);
+        DataSource::CreateFromURI(httpService, url, headers, &sniffedMIME);
 
-    initFromDataSource(dataSource);
+    if (dataSource == NULL) {
+        return UNKNOWN_ERROR;
+    }
+
+    return initFromDataSource(
+            dataSource, sniffedMIME.empty() ? NULL : sniffedMIME.c_str());
 }
 
-NuPlayer::GenericSource::GenericSource(
-        const sp<AMessage> &notify,
-        int fd, int64_t offset, int64_t length)
-    : Source(notify),
-      mFetchSubtitleDataGeneration(0),
-      mFetchTimedTextDataGeneration(0),
-      mDurationUs(0ll),
-      mAudioIsVorbis(false),
-      mIsWidevine(false) {
-    DataSource::RegisterDefaultSniffers();
-
+status_t NuPlayer::GenericSource::init(
+        int fd, int64_t offset, int64_t length) {
     sp<DataSource> dataSource = new FileSource(dup(fd), offset, length);
 
-    initFromDataSource(dataSource);
+    return initFromDataSource(dataSource, NULL);
 }
 
-void NuPlayer::GenericSource::initFromDataSource(
-        const sp<DataSource> &dataSource) {
+status_t NuPlayer::GenericSource::initFromDataSource(
+        const sp<DataSource> &dataSource,
+        const char* mime) {
     sp<MediaExtractor> extractor;
 
     if (mIsWidevine) {
@@ -88,7 +92,7 @@
                 || strcasecmp(
                     mimeType.string(), MEDIA_MIMETYPE_CONTAINER_WVM)) {
             ALOGE("unsupported widevine mime: %s", mimeType.string());
-            return;
+            return UNKNOWN_ERROR;
         }
 
         sp<WVMExtractor> wvmExtractor = new WVMExtractor(dataSource);
@@ -98,10 +102,12 @@
         }
         extractor = wvmExtractor;
     } else {
-        extractor = MediaExtractor::Create(dataSource);
+        extractor = MediaExtractor::Create(dataSource, mime);
     }
 
-    CHECK(extractor != NULL);
+    if (extractor == NULL) {
+        return UNKNOWN_ERROR;
+    }
 
     sp<MetaData> fileMeta = extractor->getMetaData();
     if (fileMeta != NULL) {
@@ -134,6 +140,15 @@
             if (mVideoTrack.mSource == NULL) {
                 mVideoTrack.mIndex = i;
                 mVideoTrack.mSource = track;
+
+                // check if the source requires secure buffers
+                int32_t secure;
+                if (meta->findInt32(kKeyRequiresSecureBuffers, &secure) && secure) {
+                    mIsWidevine = true;
+                    if (mUIDValid) {
+                        extractor->setUID(mUID);
+                    }
+                }
             }
         }
 
@@ -147,6 +162,8 @@
             }
         }
     }
+
+    return OK;
 }
 
 status_t NuPlayer::GenericSource::setBuffers(bool audio, Vector<MediaBuffer *> &buffers) {
@@ -273,7 +290,7 @@
 
           int64_t timeUs, actualTimeUs;
           const bool formatChange = true;
-          sp<AMessage> latestMeta = track->mPackets->getLatestMeta();
+          sp<AMessage> latestMeta = track->mPackets->getLatestEnqueuedMeta();
           CHECK(latestMeta != NULL && latestMeta->findInt64("timeUs", &timeUs));
           readBuffer(trackType, timeUs, &actualTimeUs, formatChange);
           readBuffer(counterpartType, -1, NULL, formatChange);
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.h b/media/libmediaplayerservice/nuplayer/GenericSource.h
index 3c5f55c..76e628b 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.h
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.h
@@ -34,18 +34,14 @@
 class MediaBuffer;
 
 struct NuPlayer::GenericSource : public NuPlayer::Source {
-    GenericSource(
-            const sp<AMessage> &notify,
+    GenericSource(const sp<AMessage> &notify, bool uidValid, uid_t uid);
+
+    status_t init(
             const sp<IMediaHTTPService> &httpService,
             const char *url,
-            const KeyedVector<String8, String8> *headers,
-            bool isWidevine = false,
-            bool uidValid = false,
-            uid_t uid = 0);
+            const KeyedVector<String8, String8> *headers);
 
-    GenericSource(
-            const sp<AMessage> &notify,
-            int fd, int64_t offset, int64_t length);
+    status_t init(int fd, int64_t offset, int64_t length);
 
     virtual void prepareAsync();
 
@@ -101,7 +97,9 @@
     bool mUIDValid;
     uid_t mUID;
 
-    void initFromDataSource(const sp<DataSource> &dataSource);
+    status_t initFromDataSource(
+            const sp<DataSource> &dataSource,
+            const char *mime);
 
     void fetchTextData(
             uint32_t what, media_track_type type,
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
index 58d0138..ba6fb7d 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
@@ -207,6 +207,7 @@
         const sp<IMediaHTTPService> &httpService,
         const char *url,
         const KeyedVector<String8, String8> *headers) {
+
     sp<AMessage> msg = new AMessage(kWhatSetDataSource, id());
     size_t len = strlen(url);
 
@@ -224,14 +225,21 @@
                     || strstr(url, ".sdp?"))) {
         source = new RTSPSource(
                 notify, httpService, url, headers, mUIDValid, mUID, true);
-    } else if ((!strncasecmp(url, "widevine://", 11))) {
-        source = new GenericSource(notify, httpService, url, headers,
-                true /* isWidevine */, mUIDValid, mUID);
-        mSourceFlags |= Source::FLAG_SECURE;
     } else {
-        source = new GenericSource(notify, httpService, url, headers);
-    }
+        sp<GenericSource> genericSource =
+                new GenericSource(notify, mUIDValid, mUID);
+        // Don't set FLAG_SECURE on mSourceFlags here for widevine.
+        // The correct flags will be updated in Source::kWhatFlagsChanged
+        // handler when  GenericSource is prepared.
 
+        status_t err = genericSource->init(httpService, url, headers);
+
+        if (err == OK) {
+            source = genericSource;
+        } else {
+            ALOGE("Failed to initialize generic source!");
+        }
+    }
     msg->setObject("source", source);
     msg->post();
 }
@@ -241,7 +249,16 @@
 
     sp<AMessage> notify = new AMessage(kWhatSourceNotify, id());
 
-    sp<Source> source = new GenericSource(notify, fd, offset, length);
+    sp<GenericSource> source =
+            new GenericSource(notify, mUIDValid, mUID);
+
+    status_t err = source->init(fd, offset, length);
+
+    if (err != OK) {
+        ALOGE("Failed to initialize generic source!");
+        source = NULL;
+    }
+
     msg->setObject("source", source);
     msg->post();
 }
@@ -350,17 +367,20 @@
 
             CHECK(mSource == NULL);
 
+            status_t err = OK;
             sp<RefBase> obj;
             CHECK(msg->findObject("source", &obj));
-
-            mSource = static_cast<Source *>(obj.get());
-
-            looper()->registerHandler(mSource);
+            if (obj != NULL) {
+                mSource = static_cast<Source *>(obj.get());
+                looper()->registerHandler(mSource);
+            } else {
+                err = UNKNOWN_ERROR;
+            }
 
             CHECK(mDriver != NULL);
             sp<NuPlayerDriver> driver = mDriver.promote();
             if (driver != NULL) {
-                driver->notifySetDataSourceCompleted(OK);
+                driver->notifySetDataSourceCompleted(err);
             }
             break;
         }
@@ -749,6 +769,15 @@
                             ALOGV("Mime \"%s\" mapped to audio_format 0x%x",
                                     mime.c_str(), audioFormat);
 
+                            int32_t aacProfile = -1;
+                            if (audioFormat == AUDIO_FORMAT_AAC
+                                    && format->findInt32("aac-profile", &aacProfile)) {
+                                // Redefine AAC format as per aac profile
+                                mapAACProfileToAudioFormat(
+                                        audioFormat,
+                                        aacProfile);
+                            }
+
                             flags |= AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD;
 
                             offloadInfo.duration_us = -1;
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index 5b6e59e..b81674d 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -1289,16 +1289,20 @@
             err = INVALID_OPERATION;
         } else {
             int32_t isADTS, aacProfile;
+            int32_t sbrMode;
             if (!msg->findInt32("is-adts", &isADTS)) {
                 isADTS = 0;
             }
             if (!msg->findInt32("aac-profile", &aacProfile)) {
                 aacProfile = OMX_AUDIO_AACObjectNull;
             }
+            if (!msg->findInt32("aac-sbr-mode", &sbrMode)) {
+                sbrMode = -1;
+            }
 
             err = setupAACCodec(
                     encoder, numChannels, sampleRate, bitRate, aacProfile,
-                    isADTS != 0);
+                    isADTS != 0, sbrMode);
         }
     } else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AMR_NB)) {
         err = setupAMRCodec(encoder, false /* isWAMR */, bitRate);
@@ -1460,7 +1464,7 @@
 
 status_t ACodec::setupAACCodec(
         bool encoder, int32_t numChannels, int32_t sampleRate,
-        int32_t bitRate, int32_t aacProfile, bool isADTS) {
+        int32_t bitRate, int32_t aacProfile, bool isADTS, int32_t sbrMode) {
     if (encoder && isADTS) {
         return -EINVAL;
     }
@@ -1527,6 +1531,32 @@
         profile.nAACERtools = OMX_AUDIO_AACERNone;
         profile.eAACProfile = (OMX_AUDIO_AACPROFILETYPE) aacProfile;
         profile.eAACStreamFormat = OMX_AUDIO_AACStreamFormatMP4FF;
+        switch (sbrMode) {
+        case 0:
+            // disable sbr
+            profile.nAACtools &= ~OMX_AUDIO_AACToolAndroidSSBR;
+            profile.nAACtools &= ~OMX_AUDIO_AACToolAndroidDSBR;
+            break;
+        case 1:
+            // enable single-rate sbr
+            profile.nAACtools |= OMX_AUDIO_AACToolAndroidSSBR;
+            profile.nAACtools &= ~OMX_AUDIO_AACToolAndroidDSBR;
+            break;
+        case 2:
+            // enable dual-rate sbr
+            profile.nAACtools &= ~OMX_AUDIO_AACToolAndroidSSBR;
+            profile.nAACtools |= OMX_AUDIO_AACToolAndroidDSBR;
+            break;
+        case -1:
+            // enable both modes -> the codec will decide which mode should be used
+            profile.nAACtools |= OMX_AUDIO_AACToolAndroidSSBR;
+            profile.nAACtools |= OMX_AUDIO_AACToolAndroidDSBR;
+            break;
+        default:
+            // unsupported sbr mode
+            return BAD_VALUE;
+        }
+
 
         err = mOMX->setParameter(
                 mNode, OMX_IndexParamAudioAac, &profile, sizeof(profile));
diff --git a/media/libstagefright/DataSource.cpp b/media/libstagefright/DataSource.cpp
index 6e0f37a..908cdca 100644
--- a/media/libstagefright/DataSource.cpp
+++ b/media/libstagefright/DataSource.cpp
@@ -13,6 +13,8 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+//#define LOG_NDEBUG 0
+#define LOG_TAG "DataSource"
 
 #include "include/AMRExtractor.h"
 
@@ -33,6 +35,7 @@
 
 #include <media/IMediaHTTPConnection.h>
 #include <media/IMediaHTTPService.h>
+#include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AMessage.h>
 #include <media/stagefright/DataSource.h>
 #include <media/stagefright/DataURISource.h>
@@ -182,7 +185,12 @@
 sp<DataSource> DataSource::CreateFromURI(
         const sp<IMediaHTTPService> &httpService,
         const char *uri,
-        const KeyedVector<String8, String8> *headers) {
+        const KeyedVector<String8, String8> *headers,
+        AString *sniffedMIME) {
+    if (sniffedMIME != NULL) {
+        *sniffedMIME = "";
+    }
+
     bool isWidevine = !strncasecmp("widevine://", uri, 11);
 
     sp<DataSource> source;
@@ -202,6 +210,7 @@
         }
 
         if (httpSource->connect(uri, headers) != OK) {
+            ALOGE("Failed to connect http source!");
             return NULL;
         }
 
@@ -214,9 +223,76 @@
                         &copy, &cacheConfig, &disconnectAtHighwatermark);
             }
 
-            source = new NuCachedSource2(
+            sp<NuCachedSource2> cachedSource = new NuCachedSource2(
                     httpSource,
                     cacheConfig.isEmpty() ? NULL : cacheConfig.string());
+
+            String8 contentType = httpSource->getMIMEType();
+
+            if (strncasecmp(contentType.string(), "audio/", 6)) {
+                // We're not doing this for streams that appear to be audio-only
+                // streams to ensure that even low bandwidth streams start
+                // playing back fairly instantly.
+
+                // We're going to prefill the cache before trying to instantiate
+                // the extractor below, as the latter is an operation that otherwise
+                // could block on the datasource for a significant amount of time.
+                // During that time we'd be unable to abort the preparation phase
+                // without this prefill.
+
+                // Initially make sure we have at least 192 KB for the sniff
+                // to complete without blocking.
+                static const size_t kMinBytesForSniffing = 192 * 1024;
+
+                off64_t metaDataSize = -1ll;
+                for (;;) {
+                    status_t finalStatus;
+                    size_t cachedDataRemaining =
+                            cachedSource->approxDataRemaining(&finalStatus);
+
+                    if (finalStatus != OK || (metaDataSize >= 0
+                            && (off64_t)cachedDataRemaining >= metaDataSize)) {
+                        ALOGV("stop caching, status %d, "
+                                "metaDataSize %lld, cachedDataRemaining %zu",
+                                finalStatus, metaDataSize, cachedDataRemaining);
+                        break;
+                    }
+
+                    ALOGV("now cached %zu bytes of data", cachedDataRemaining);
+
+                    if (metaDataSize < 0
+                            && cachedDataRemaining >= kMinBytesForSniffing) {
+                        String8 tmp;
+                        float confidence;
+                        sp<AMessage> meta;
+                        if (!cachedSource->sniff(&tmp, &confidence, &meta)) {
+                            return NULL;
+                        }
+
+                        // We successfully identified the file's extractor to
+                        // be, remember this mime type so we don't have to
+                        // sniff it again when we call MediaExtractor::Create()
+                        if (sniffedMIME != NULL) {
+                            *sniffedMIME = tmp.string();
+                        }
+
+                        if (meta == NULL
+                                || !meta->findInt64("meta-data-size",
+                                     reinterpret_cast<int64_t*>(&metaDataSize))) {
+                            metaDataSize = kDefaultMetaSize;
+                        }
+
+                        if (metaDataSize < 0ll) {
+                            ALOGE("invalid metaDataSize = %lld bytes", metaDataSize);
+                            return NULL;
+                        }
+                    }
+
+                    usleep(200000);
+                }
+            }
+
+            source = cachedSource;
         } else {
             // We do not want that prefetching, caching, datasource wrapper
             // in the widevine:// case.
diff --git a/media/libstagefright/Utils.cpp b/media/libstagefright/Utils.cpp
index 587e264..5f1d1c6 100644
--- a/media/libstagefright/Utils.cpp
+++ b/media/libstagefright/Utils.cpp
@@ -135,6 +135,11 @@
         if (meta->findInt32(kKeyIsADTS, &isADTS)) {
             msg->setInt32("is-adts", true);
         }
+
+        int32_t aacProfile = -1;
+        if (meta->findInt32(kKeyAACAOT, &aacProfile)) {
+            msg->setInt32("aac-profile", aacProfile);
+        }
     }
 
     int32_t maxInputSize;
diff --git a/media/libstagefright/codecs/aacenc/SoftAACEncoder2.cpp b/media/libstagefright/codecs/aacenc/SoftAACEncoder2.cpp
index 6093621..35aa883 100644
--- a/media/libstagefright/codecs/aacenc/SoftAACEncoder2.cpp
+++ b/media/libstagefright/codecs/aacenc/SoftAACEncoder2.cpp
@@ -19,6 +19,7 @@
 #include <utils/Log.h>
 
 #include "SoftAACEncoder2.h"
+#include <OMX_AudioExt.h>
 
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/hexdump.h>
@@ -44,6 +45,8 @@
       mNumChannels(1),
       mSampleRate(44100),
       mBitRate(0),
+      mSBRMode(-1),
+      mSBRRatio(0),
       mAACProfile(OMX_AUDIO_AACObjectLC),
       mSentCodecSpecificData(false),
       mInputSize(0),
@@ -156,6 +159,41 @@
             aacParams->nSampleRate = mSampleRate;
             aacParams->nFrameLength = 0;
 
+            switch (mSBRMode) {
+            case 1: // sbr on
+                switch (mSBRRatio) {
+                case 0:
+                    // set both OMX AAC tool flags
+                    aacParams->nAACtools |= OMX_AUDIO_AACToolAndroidSSBR;
+                    aacParams->nAACtools |= OMX_AUDIO_AACToolAndroidDSBR;
+                    break;
+                case 1:
+                    // set single-rate SBR active
+                    aacParams->nAACtools |= OMX_AUDIO_AACToolAndroidSSBR;
+                    aacParams->nAACtools &= ~OMX_AUDIO_AACToolAndroidDSBR;
+                    break;
+                case 2:
+                    // set dual-rate SBR active
+                    aacParams->nAACtools &= ~OMX_AUDIO_AACToolAndroidSSBR;
+                    aacParams->nAACtools |= OMX_AUDIO_AACToolAndroidDSBR;
+                    break;
+                default:
+                    ALOGE("invalid SBR ratio %d", mSBRRatio);
+                    TRESPASS();
+                }
+                break;
+            case 0:  // sbr off
+            case -1: // sbr undefined
+                aacParams->nAACtools &= ~OMX_AUDIO_AACToolAndroidSSBR;
+                aacParams->nAACtools &= ~OMX_AUDIO_AACToolAndroidDSBR;
+                break;
+            default:
+                ALOGE("invalid SBR mode %d", mSBRMode);
+                TRESPASS();
+            }
+
+
+
             return OMX_ErrorNone;
         }
 
@@ -243,6 +281,23 @@
                 mAACProfile = aacParams->eAACProfile;
             }
 
+            if (!(aacParams->nAACtools & OMX_AUDIO_AACToolAndroidSSBR)
+                    && !(aacParams->nAACtools & OMX_AUDIO_AACToolAndroidDSBR)) {
+                mSBRMode = 0;
+                mSBRRatio = 0;
+            } else if ((aacParams->nAACtools & OMX_AUDIO_AACToolAndroidSSBR)
+                    && !(aacParams->nAACtools & OMX_AUDIO_AACToolAndroidDSBR)) {
+                mSBRMode = 1;
+                mSBRRatio = 1;
+            } else if (!(aacParams->nAACtools & OMX_AUDIO_AACToolAndroidSSBR)
+                    && (aacParams->nAACtools & OMX_AUDIO_AACToolAndroidDSBR)) {
+                mSBRMode = 1;
+                mSBRRatio = 2;
+            } else {
+                mSBRMode = -1; // codec default sbr mode
+                mSBRRatio = 0;
+            }
+
             if (setAudioParams() != OK) {
                 return OMX_ErrorUndefined;
             }
@@ -305,11 +360,11 @@
 }
 
 status_t SoftAACEncoder2::setAudioParams() {
-    // We call this whenever sample rate, number of channels or bitrate change
+    // We call this whenever sample rate, number of channels, bitrate or SBR mode change
     // in reponse to setParameter calls.
 
-    ALOGV("setAudioParams: %u Hz, %u channels, %u bps",
-         mSampleRate, mNumChannels, mBitRate);
+    ALOGV("setAudioParams: %u Hz, %u channels, %u bps, %i sbr mode, %i sbr ratio",
+         mSampleRate, mNumChannels, mBitRate, mSBRMode, mSBRRatio);
 
     if (AACENC_OK != aacEncoder_SetParam(mAACEncoder, AACENC_AOT,
             getAOTFromProfile(mAACProfile))) {
@@ -335,6 +390,24 @@
         return UNKNOWN_ERROR;
     }
 
+    if (mSBRMode != -1 && mAACProfile == OMX_AUDIO_AACObjectELD) {
+        if (AACENC_OK != aacEncoder_SetParam(mAACEncoder, AACENC_SBR_MODE, mSBRMode)) {
+            ALOGE("Failed to set AAC encoder parameters");
+            return UNKNOWN_ERROR;
+        }
+    }
+
+    /* SBR ratio parameter configurations:
+       0: Default configuration wherein SBR ratio is configured depending on audio object type by
+          the FDK.
+       1: Downsampled SBR (default for ELD)
+       2: Dualrate SBR (default for HE-AAC)
+     */
+    if (AACENC_OK != aacEncoder_SetParam(mAACEncoder, AACENC_SBR_RATIO, mSBRRatio)) {
+        ALOGE("Failed to set AAC encoder parameters");
+        return UNKNOWN_ERROR;
+    }
+
     return OK;
 }
 
diff --git a/media/libstagefright/codecs/aacenc/SoftAACEncoder2.h b/media/libstagefright/codecs/aacenc/SoftAACEncoder2.h
index 2603f4f..bce9c24 100644
--- a/media/libstagefright/codecs/aacenc/SoftAACEncoder2.h
+++ b/media/libstagefright/codecs/aacenc/SoftAACEncoder2.h
@@ -53,6 +53,8 @@
     OMX_U32 mNumChannels;
     OMX_U32 mSampleRate;
     OMX_U32 mBitRate;
+    OMX_S32 mSBRMode;
+    OMX_S32 mSBRRatio;
     OMX_U32 mAACProfile;
 
     bool mSentCodecSpecificData;
diff --git a/media/libstagefright/httplive/LiveSession.cpp b/media/libstagefright/httplive/LiveSession.cpp
index 10cdde2..8667a6b 100644
--- a/media/libstagefright/httplive/LiveSession.cpp
+++ b/media/libstagefright/httplive/LiveSession.cpp
@@ -57,7 +57,7 @@
       mHTTPService(httpService),
       mInPreparationPhase(true),
       mHTTPDataSource(new MediaHTTP(mHTTPService->makeHTTPConnection())),
-      mPrevBandwidthIndex(-1),
+      mCurBandwidthIndex(-1),
       mStreamMask(0),
       mNewStreamMask(0),
       mSwapMask(0),
@@ -68,13 +68,17 @@
       mReconfigurationInProgress(false),
       mSwitchInProgress(false),
       mDisconnectReplyID(0),
-      mSeekReplyID(0) {
+      mSeekReplyID(0),
+      mFirstTimeUsValid(false),
+      mFirstTimeUs(0),
+      mLastSeekTimeUs(0) {
 
     mStreams[kAudioIndex] = StreamItem("audio");
     mStreams[kVideoIndex] = StreamItem("video");
     mStreams[kSubtitleIndex] = StreamItem("subtitles");
 
     for (size_t i = 0; i < kMaxStreams; ++i) {
+        mDiscontinuities.add(indexToType(i), new AnotherPacketSource(NULL /* meta */));
         mPacketSources.add(indexToType(i), new AnotherPacketSource(NULL /* meta */));
         mPacketSources2.add(indexToType(i), new AnotherPacketSource(NULL /* meta */));
     }
@@ -109,31 +113,65 @@
         return -EWOULDBLOCK;
     }
 
+    status_t finalResult;
+    sp<AnotherPacketSource> discontinuityQueue  = mDiscontinuities.valueFor(stream);
+    if (discontinuityQueue->hasBufferAvailable(&finalResult)) {
+        discontinuityQueue->dequeueAccessUnit(accessUnit);
+        // seeking, track switching
+        sp<AMessage> extra;
+        int64_t timeUs;
+        if ((*accessUnit)->meta()->findMessage("extra", &extra)
+                && extra != NULL
+                && extra->findInt64("timeUs", &timeUs)) {
+            // seeking only
+            mLastSeekTimeUs = timeUs;
+            mDiscontinuityOffsetTimesUs.clear();
+            mDiscontinuityAbsStartTimesUs.clear();
+        }
+        return INFO_DISCONTINUITY;
+    }
+
     sp<AnotherPacketSource> packetSource = mPacketSources.valueFor(stream);
 
-    status_t finalResult;
     if (!packetSource->hasBufferAvailable(&finalResult)) {
         return finalResult == OK ? -EAGAIN : finalResult;
     }
 
+    // wait for counterpart
+    sp<AnotherPacketSource> otherSource;
+    if (stream == STREAMTYPE_AUDIO && (mStreamMask & STREAMTYPE_VIDEO)) {
+        otherSource = mPacketSources.valueFor(STREAMTYPE_VIDEO);
+    } else if (stream == STREAMTYPE_VIDEO && (mStreamMask & STREAMTYPE_AUDIO)) {
+        otherSource = mPacketSources.valueFor(STREAMTYPE_AUDIO);
+    }
+    if (otherSource != NULL && !otherSource->hasBufferAvailable(&finalResult)) {
+        return finalResult == OK ? -EAGAIN : finalResult;
+    }
+
     status_t err = packetSource->dequeueAccessUnit(accessUnit);
 
+    size_t streamIdx;
     const char *streamStr;
     switch (stream) {
         case STREAMTYPE_AUDIO:
+            streamIdx = kAudioIndex;
             streamStr = "audio";
             break;
         case STREAMTYPE_VIDEO:
+            streamIdx = kVideoIndex;
             streamStr = "video";
             break;
         case STREAMTYPE_SUBTITLES:
+            streamIdx = kSubtitleIndex;
             streamStr = "subs";
             break;
         default:
             TRESPASS();
     }
 
+    StreamItem& strm = mStreams[streamIdx];
     if (err == INFO_DISCONTINUITY) {
+        // adaptive streaming, discontinuities in the playlist
         int32_t type;
         CHECK((*accessUnit)->meta()->findInt32("discontinuity", &type));
 
@@ -148,10 +186,7 @@
               extra == NULL ? "NULL" : extra->debugString().c_str());
 
         int32_t swap;
-        if (type == ATSParser::DISCONTINUITY_FORMATCHANGE
-                && (*accessUnit)->meta()->findInt32("swapPacketSource", &swap)
-                && swap) {
-
+        if ((*accessUnit)->meta()->findInt32("swapPacketSource", &swap) && swap) {
             int32_t switchGeneration;
             CHECK((*accessUnit)->meta()->findInt32("switchGeneration", &switchGeneration));
             {
@@ -164,13 +199,67 @@
                     msg->post();
                 }
             }
+        } else {
+            size_t seq = strm.mCurDiscontinuitySeq;
+            int64_t offsetTimeUs;
+            if (mDiscontinuityOffsetTimesUs.indexOfKey(seq) >= 0) {
+                offsetTimeUs = mDiscontinuityOffsetTimesUs.valueFor(seq);
+            } else {
+                offsetTimeUs = 0;
+            }
+
+            seq += 1;
+            if (mDiscontinuityAbsStartTimesUs.indexOfKey(strm.mCurDiscontinuitySeq) >= 0) {
+                int64_t firstTimeUs;
+                firstTimeUs = mDiscontinuityAbsStartTimesUs.valueFor(strm.mCurDiscontinuitySeq);
+                offsetTimeUs += strm.mLastDequeuedTimeUs - firstTimeUs;
+                offsetTimeUs += strm.mLastSampleDurationUs;
+            } else {
+                offsetTimeUs += strm.mLastSampleDurationUs;
+            }
+
+            mDiscontinuityOffsetTimesUs.add(seq, offsetTimeUs);
         }
     } else if (err == OK) {
+
         if (stream == STREAMTYPE_AUDIO || stream == STREAMTYPE_VIDEO) {
             int64_t timeUs;
+            int32_t discontinuitySeq = 0;
             CHECK((*accessUnit)->meta()->findInt64("timeUs",  &timeUs));
-            ALOGV("[%s] read buffer at time %" PRId64 " us", streamStr, timeUs);
+            (*accessUnit)->meta()->findInt32("discontinuitySeq", &discontinuitySeq);
+            strm.mCurDiscontinuitySeq = discontinuitySeq;
 
+            int32_t discard = 0;
+            int64_t firstTimeUs;
+            if (mDiscontinuityAbsStartTimesUs.indexOfKey(strm.mCurDiscontinuitySeq) >= 0) {
+                int64_t durUs; // approximate sample duration
+                if (timeUs > strm.mLastDequeuedTimeUs) {
+                    durUs = timeUs - strm.mLastDequeuedTimeUs;
+                } else {
+                    durUs = strm.mLastDequeuedTimeUs - timeUs;
+                }
+                strm.mLastSampleDurationUs = durUs;
+                firstTimeUs = mDiscontinuityAbsStartTimesUs.valueFor(strm.mCurDiscontinuitySeq);
+            } else if ((*accessUnit)->meta()->findInt32("discard", &discard) && discard) {
+                firstTimeUs = timeUs;
+            } else {
+                mDiscontinuityAbsStartTimesUs.add(strm.mCurDiscontinuitySeq, timeUs);
+                firstTimeUs = timeUs;
+            }
+
+            strm.mLastDequeuedTimeUs = timeUs;
+            if (timeUs >= firstTimeUs) {
+                timeUs -= firstTimeUs;
+            } else {
+                timeUs = 0;
+            }
+            timeUs += mLastSeekTimeUs;
+            if (mDiscontinuityOffsetTimesUs.indexOfKey(discontinuitySeq) >= 0) {
+                timeUs += mDiscontinuityOffsetTimesUs.valueFor(discontinuitySeq);
+            }
+
+            ALOGV("[%s] read buffer at time %" PRId64 " us", streamStr, timeUs);
+            (*accessUnit)->meta()->setInt64("timeUs",  timeUs);
             mLastDequeuedTimeUs = timeUs;
             mRealTimeBaseUs = ALooper::GetNowUs() - timeUs;
         } else if (stream == STREAMTYPE_SUBTITLES) {
@@ -289,7 +378,9 @@
                             break;
                         }
 
-                        tryToFinishBandwidthSwitch();
+                        if (mSwitchInProgress) {
+                            tryToFinishBandwidthSwitch();
+                        }
                     }
 
                     if (mContinuation != NULL) {
@@ -538,8 +629,9 @@
         mBandwidthItems.push(item);
     }
 
+    mPlaylist->pickRandomMediaItems();
     changeConfiguration(
-            0ll /* timeUs */, initialBandwidthIndex, true /* pickTrack */);
+            0ll /* timeUs */, initialBandwidthIndex, false /* pickTrack */);
 }
 
 void LiveSession::finishDisconnect() {
@@ -847,20 +939,20 @@
     // to lowest)
     const size_t kMinIndex = 0;
 
-    static ssize_t mPrevBandwidthIndex = -1;
+    static ssize_t mCurBandwidthIndex = -1;
 
     size_t index;
-    if (mPrevBandwidthIndex < 0) {
+    if (mCurBandwidthIndex < 0) {
         index = kMinIndex;
     } else if (uniformRand() < 0.5) {
-        index = (size_t)mPrevBandwidthIndex;
+        index = (size_t)mCurBandwidthIndex;
     } else {
-        index = mPrevBandwidthIndex + 1;
+        index = mCurBandwidthIndex + 1;
         if (index == mBandwidthItems.size()) {
             index = kMinIndex;
         }
     }
-    mPrevBandwidthIndex = index;
+    mCurBandwidthIndex = index;
 #elif 0
     // Pick the highest bandwidth stream below or equal to 1.2 Mbit/sec
 
@@ -937,7 +1029,10 @@
 status_t LiveSession::selectTrack(size_t index, bool select) {
     status_t err = mPlaylist->selectTrack(index, select);
     if (err == OK) {
-        (new AMessage(kWhatChangeConfiguration, id()))->post();
+        sp<AMessage> msg = new AMessage(kWhatChangeConfiguration, id());
+        msg->setInt32("bandwidthIndex", mCurBandwidthIndex);
+        msg->setInt32("pickTrack", select);
+        msg->post();
     }
     return err;
 }
@@ -964,15 +1059,11 @@
     CHECK(!mReconfigurationInProgress);
     mReconfigurationInProgress = true;
 
-    mPrevBandwidthIndex = bandwidthIndex;
+    mCurBandwidthIndex = bandwidthIndex;
 
     ALOGV("changeConfiguration => timeUs:%" PRId64 " us, bwIndex:%zu, pickTrack:%d",
           timeUs, bandwidthIndex, pickTrack);
 
-    if (pickTrack) {
-        mPlaylist->pickRandomMediaItems();
-    }
-
     CHECK_LT(bandwidthIndex, mBandwidthItems.size());
     const BandwidthItem &item = mBandwidthItems.itemAt(bandwidthIndex);
 
@@ -995,14 +1086,15 @@
 
         // If we're seeking all current fetchers are discarded.
         if (timeUs < 0ll) {
-            // delay fetcher removal
-            discardFetcher = false;
+            // delay fetcher removal if not picking tracks
+            discardFetcher = pickTrack;
 
             for (size_t j = 0; j < kMaxStreams; ++j) {
                 StreamType type = indexToType(j);
                 if ((streamMask & type) && uri == URIs[j]) {
                     resumeMask |= type;
                     streamMask &= ~type;
+                    discardFetcher = false;
                 }
             }
         }
@@ -1016,16 +1108,17 @@
 
     sp<AMessage> msg;
     if (timeUs < 0ll) {
-        // skip onChangeConfiguration2 (decoder destruction) if switching.
+        // skip onChangeConfiguration2 (decoder destruction) if not seeking.
         msg = new AMessage(kWhatChangeConfiguration3, id());
     } else {
         msg = new AMessage(kWhatChangeConfiguration2, id());
     }
     msg->setInt32("streamMask", streamMask);
     msg->setInt32("resumeMask", resumeMask);
+    msg->setInt32("pickTrack", pickTrack);
     msg->setInt64("timeUs", timeUs);
     for (size_t i = 0; i < kMaxStreams; ++i) {
-        if (streamMask & indexToType(i)) {
+        if ((streamMask | resumeMask) & indexToType(i)) {
             msg->setString(mStreams[i].uriKey().c_str(), URIs[i].c_str());
         }
     }
@@ -1049,7 +1142,10 @@
 
 void LiveSession::onChangeConfiguration(const sp<AMessage> &msg) {
     if (!mReconfigurationInProgress) {
-        changeConfiguration(-1ll /* timeUs */, getBandwidthIndex());
+        int32_t pickTrack = 0, bandwidthIndex = mCurBandwidthIndex;
+        msg->findInt32("pickTrack", &pickTrack);
+        msg->findInt32("bandwidthIndex", &bandwidthIndex);
+        changeConfiguration(-1ll /* timeUs */, bandwidthIndex, pickTrack);
     } else {
         msg->post(1000000ll); // retry in 1 sec
     }
@@ -1060,8 +1156,14 @@
 
     // All fetchers are either suspended or have been removed now.
 
-    uint32_t streamMask;
+    uint32_t streamMask, resumeMask;
     CHECK(msg->findInt32("streamMask", (int32_t *)&streamMask));
+    CHECK(msg->findInt32("resumeMask", (int32_t *)&resumeMask));
+
+    // currently onChangeConfiguration2 is only called for seeking;
+    // remove the following CHECK if using it else where.
+    CHECK_EQ(resumeMask, 0);
+    streamMask |= resumeMask;
 
     AString URIs[kMaxStreams];
     for (size_t i = 0; i < kMaxStreams; ++i) {
@@ -1125,16 +1227,21 @@
     }
 
     int64_t timeUs;
+    int32_t pickTrack;
     bool switching = false;
     CHECK(msg->findInt64("timeUs", &timeUs));
+    CHECK(msg->findInt32("pickTrack", &pickTrack));
 
     if (timeUs < 0ll) {
-        timeUs = mLastDequeuedTimeUs;
-        switching = true;
+        if (!pickTrack) {
+            switching = true;
+        }
+        mRealTimeBaseUs = ALooper::GetNowUs() - mLastDequeuedTimeUs;
+    } else {
+        mRealTimeBaseUs = ALooper::GetNowUs() - timeUs;
     }
-    mRealTimeBaseUs = ALooper::GetNowUs() - timeUs;
 
-    mNewStreamMask = streamMask;
+    mNewStreamMask = streamMask | resumeMask;
 
     // Of all existing fetchers:
     // * Resume fetchers that are still needed and assign them original packet sources.
@@ -1147,6 +1254,16 @@
         for (size_t j = 0; j < kMaxStreams; ++j) {
             if ((resumeMask & indexToType(j)) && uri == mStreams[j].mUri) {
                 sources[j] = mPacketSources.valueFor(indexToType(j));
+
+                if (j != kSubtitleIndex) {
+                    ALOGV("queueing dummy discontinuity for stream type %d", indexToType(j));
+                    sp<AnotherPacketSource> discontinuityQueue;
+                    discontinuityQueue = mDiscontinuities.valueFor(indexToType(j));
+                    discontinuityQueue->queueDiscontinuity(
+                            ATSParser::DISCONTINUITY_NONE,
+                            NULL,
+                            true);
+                }
             }
         }
 
@@ -1180,7 +1297,9 @@
         CHECK(fetcher != NULL);
 
         int32_t latestSeq = -1;
-        int64_t latestTimeUs = 0ll;
+        int64_t startTimeUs = -1;
+        int64_t segmentStartTimeUs = -1ll;
+        int32_t discontinuitySeq = -1;
         sp<AnotherPacketSource> sources[kMaxStreams];
 
         // TRICKY: looping from i as earlier streams are already removed from streamMask
@@ -1188,29 +1307,65 @@
             if ((streamMask & indexToType(j)) && uri == mStreams[j].mUri) {
                 sources[j] = mPacketSources.valueFor(indexToType(j));
 
-                if (!switching) {
+                if (timeUs >= 0) {
                     sources[j]->clear();
+                    startTimeUs = timeUs;
+
+                    sp<AnotherPacketSource> discontinuityQueue;
+                    sp<AMessage> extra = new AMessage;
+                    extra->setInt64("timeUs", timeUs);
+                    discontinuityQueue = mDiscontinuities.valueFor(indexToType(j));
+                    discontinuityQueue->queueDiscontinuity(
+                            ATSParser::DISCONTINUITY_SEEK, extra, true);
                 } else {
-                    int32_t type, seq;
-                    int64_t srcTimeUs;
-                    sp<AMessage> meta = sources[j]->getLatestMeta();
+                    int32_t type;
+                    int64_t srcSegmentStartTimeUs;
+                    sp<AMessage> meta;
+                    if (pickTrack) {
+                        // selecting
+                        meta = sources[j]->getLatestDequeuedMeta();
+                    } else {
+                        // adapting
+                        meta = sources[j]->getLatestEnqueuedMeta();
+                    }
 
                     if (meta != NULL && !meta->findInt32("discontinuity", &type)) {
-                        CHECK(meta->findInt32("seq", &seq));
-                        if (seq > latestSeq) {
-                            latestSeq = seq;
+                        int64_t tmpUs;
+                        CHECK(meta->findInt64("timeUs", &tmpUs));
+                        if (startTimeUs < 0 || tmpUs < startTimeUs) {
+                            startTimeUs = tmpUs;
                         }
-                        CHECK(meta->findInt64("timeUs", &srcTimeUs));
-                        if (srcTimeUs > latestTimeUs) {
-                            latestTimeUs = srcTimeUs;
+
+                        CHECK(meta->findInt64("segmentStartTimeUs", &tmpUs));
+                        if (segmentStartTimeUs < 0 || tmpUs < segmentStartTimeUs) {
+                            segmentStartTimeUs = tmpUs;
+                        }
+
+                        int32_t seq;
+                        CHECK(meta->findInt32("discontinuitySeq", &seq));
+                        if (discontinuitySeq < 0 || seq < discontinuitySeq) {
+                            discontinuitySeq = seq;
                         }
                     }
 
-                    sources[j] = mPacketSources2.valueFor(indexToType(j));
-                    sources[j]->clear();
-                    uint32_t extraStreams = mNewStreamMask & (~mStreamMask);
-                    if (extraStreams & indexToType(j)) {
-                        sources[j]->queueAccessUnit(createFormatChangeBuffer(/* swap = */ false));
+                    if (pickTrack) {
+                        // selecting track, queue discontinuities before content
+                        sources[j]->clear();
+                        if (j == kSubtitleIndex) {
+                            break;
+                        }
+                        sp<AnotherPacketSource> discontinuityQueue;
+                        discontinuityQueue = mDiscontinuities.valueFor(indexToType(j));
+                        discontinuityQueue->queueDiscontinuity(
+                                ATSParser::DISCONTINUITY_FORMATCHANGE, NULL, true);
+                    } else {
+                        // adapting, queue discontinuities after resume
+                        sources[j] = mPacketSources2.valueFor(indexToType(j));
+                        sources[j]->clear();
+                        uint32_t extraStreams = mNewStreamMask & (~mStreamMask);
+                        if (extraStreams & indexToType(j)) {
+                            sources[j]->queueAccessUnit(createFormatChangeBuffer(/*swap*/ false));
+                        }
                     }
                 }
 
@@ -1222,9 +1377,10 @@
                 sources[kAudioIndex],
                 sources[kVideoIndex],
                 sources[kSubtitleIndex],
-                timeUs,
-                latestTimeUs /* min start time(us) */,
-                latestSeq >= 0 ? latestSeq + 1 : -1 /* starting sequence number hint */ );
+                startTimeUs < 0 ? mLastSeekTimeUs : startTimeUs,
+                segmentStartTimeUs,
+                discontinuitySeq,
+                switching);
     }
 
     // All fetchers have now been started, the configuration change
@@ -1236,6 +1392,7 @@
     mReconfigurationInProgress = false;
     if (switching) {
         mSwitchInProgress = true;
+        mSwapMask = streamMask;
     } else {
         mStreamMask = mNewStreamMask;
     }
@@ -1254,8 +1411,8 @@
 
     int32_t stream;
     CHECK(msg->findInt32("stream", &stream));
-    mSwapMask |= stream;
-    if (mSwapMask != mStreamMask) {
+    mSwapMask &= ~stream;
+    if (mSwapMask != 0) {
         return;
     }
 
@@ -1271,9 +1428,12 @@
 }
 
 // Mark switch done when:
-//   1. all old buffers are swapped out, AND
-//   2. all old fetchers are removed.
+//   1. all old buffers are swapped out
 void LiveSession::tryToFinishBandwidthSwitch() {
+    if (!mSwitchInProgress) {
+        return;
+    }
+
     bool needToRemoveFetchers = false;
     for (size_t i = 0; i < mFetcherInfos.size(); ++i) {
         if (mFetcherInfos.valueAt(i).mToBeRemoved) {
@@ -1281,10 +1441,11 @@
             break;
         }
     }
-    if (!needToRemoveFetchers && mSwapMask == mStreamMask) {
+
+    if (!needToRemoveFetchers && mSwapMask == 0) {
+        ALOGI("mSwitchInProgress = false");
         mStreamMask = mNewStreamMask;
         mSwitchInProgress = false;
-        mSwapMask = 0;
     }
 }
 
@@ -1310,13 +1471,13 @@
         return false;
     }
 
-    if (mPrevBandwidthIndex < 0) {
+    if (mCurBandwidthIndex < 0) {
         return true;
     }
 
-    if (bandwidthIndex == (size_t)mPrevBandwidthIndex) {
+    if (bandwidthIndex == (size_t)mCurBandwidthIndex) {
         return false;
-    } else if (bandwidthIndex > (size_t)mPrevBandwidthIndex) {
+    } else if (bandwidthIndex > (size_t)mCurBandwidthIndex) {
         return canSwitchUp();
     } else {
         return true;
diff --git a/media/libstagefright/httplive/LiveSession.h b/media/libstagefright/httplive/LiveSession.h
index ed3818f..5423f0f 100644
--- a/media/libstagefright/httplive/LiveSession.h
+++ b/media/libstagefright/httplive/LiveSession.h
@@ -125,8 +125,19 @@
     struct StreamItem {
         const char *mType;
         AString mUri;
-        StreamItem() : mType("") {}
-        StreamItem(const char *type) : mType(type) {}
+        size_t mCurDiscontinuitySeq;
+        int64_t mLastDequeuedTimeUs;
+        int64_t mLastSampleDurationUs;
+        StreamItem()
+            : mType(""),
+              mCurDiscontinuitySeq(0),
+              mLastDequeuedTimeUs(0),
+              mLastSampleDurationUs(0) {}
+        StreamItem(const char *type)
+            : mType(type),
+              mCurDiscontinuitySeq(0),
+              mLastDequeuedTimeUs(0),
+              mLastSampleDurationUs(0) {}
         AString uriKey() {
             AString key(mType);
             key.append("URI");
@@ -147,7 +158,7 @@
     AString mMasterURL;
 
     Vector<BandwidthItem> mBandwidthItems;
-    ssize_t mPrevBandwidthIndex;
+    ssize_t mCurBandwidthIndex;
 
     sp<M3UParser> mPlaylist;
 
@@ -163,6 +174,7 @@
     // we use this to track reconfiguration progress.
     uint32_t mSwapMask;
 
+    KeyedVector<StreamType, sp<AnotherPacketSource> > mDiscontinuities;
     KeyedVector<StreamType, sp<AnotherPacketSource> > mPacketSources;
     // A second set of packet sources that buffer content for the variant we're switching to.
     KeyedVector<StreamType, sp<AnotherPacketSource> > mPacketSources2;
@@ -187,6 +199,12 @@
     uint32_t mDisconnectReplyID;
     uint32_t mSeekReplyID;
 
+    bool mFirstTimeUsValid;
+    int64_t mFirstTimeUs;
+    int64_t mLastSeekTimeUs;
+    KeyedVector<size_t, int64_t> mDiscontinuityAbsStartTimesUs;
+    KeyedVector<size_t, int64_t> mDiscontinuityOffsetTimesUs;
+
     sp<PlaylistFetcher> addFetcher(const char *uri);
 
     void onConnect(const sp<AMessage> &msg);
diff --git a/media/libstagefright/httplive/M3UParser.cpp b/media/libstagefright/httplive/M3UParser.cpp
index efd852c..1651dee 100644
--- a/media/libstagefright/httplive/M3UParser.cpp
+++ b/media/libstagefright/httplive/M3UParser.cpp
@@ -157,8 +157,8 @@
 }
 
 status_t M3UParser::MediaGroup::selectTrack(size_t index, bool select) {
-    if (mType != TYPE_SUBS) {
-        ALOGE("only select subtitile tracks for now!");
+    if (mType != TYPE_SUBS && mType != TYPE_AUDIO) {
+        ALOGE("only select subtitile/audio tracks for now!");
         return INVALID_OPERATION;
     }
 
@@ -246,6 +246,7 @@
       mIsVariantPlaylist(false),
       mIsComplete(false),
       mIsEvent(false),
+      mDiscontinuitySeq(0),
       mSelectedIndex(-1) {
     mInitCheck = parse(data, size);
 }
@@ -273,6 +274,10 @@
     return mIsEvent;
 }
 
+size_t M3UParser::getDiscontinuitySeq() const {
+    return mDiscontinuitySeq;
+}
+
 sp<AMessage> M3UParser::meta() {
     return mMeta;
 }
@@ -567,6 +572,12 @@
                 }
             } else if (line.startsWith("#EXT-X-MEDIA")) {
                 err = parseMedia(line);
+            } else if (line.startsWith("#EXT-X-DISCONTINUITY-SEQUENCE")) {
+                size_t seq;
+                err = parseDiscontinuitySequence(line, &seq);
+                if (err == OK) {
+                    mDiscontinuitySeq = seq;
+                }
             }
 
             if (err != OK) {
@@ -1110,6 +1121,30 @@
 }
 
 // static
+status_t M3UParser::parseDiscontinuitySequence(const AString &line, size_t *seq) {
+    ssize_t colonPos = line.find(":");
+
+    if (colonPos < 0) {
+        return ERROR_MALFORMED;
+    }
+
+    int32_t x;
+    status_t err = ParseInt32(line.c_str() + colonPos + 1, &x);
+    if (err != OK) {
+        return err;
+    }
+
+    if (x < 0) {
+        return ERROR_MALFORMED;
+    }
+
+    if (seq) {
+        *seq = x;
+    }
+    return OK;
+}
+
+// static
 status_t M3UParser::ParseInt32(const char *s, int32_t *x) {
     char *end;
     long lval = strtol(s, &end, 10);
diff --git a/media/libstagefright/httplive/M3UParser.h b/media/libstagefright/httplive/M3UParser.h
index fe9fb9d..d588afe 100644
--- a/media/libstagefright/httplive/M3UParser.h
+++ b/media/libstagefright/httplive/M3UParser.h
@@ -34,6 +34,7 @@
     bool isVariantPlaylist() const;
     bool isComplete() const;
     bool isEvent() const;
+    size_t getDiscontinuitySeq() const;
 
     sp<AMessage> meta();
 
@@ -66,6 +67,7 @@
     bool mIsVariantPlaylist;
     bool mIsComplete;
     bool mIsEvent;
+    size_t mDiscontinuitySeq;
 
     sp<AMessage> mMeta;
     Vector<Item> mItems;
@@ -94,6 +96,8 @@
 
     status_t parseMedia(const AString &line);
 
+    static status_t parseDiscontinuitySequence(const AString &line, size_t *seq);
+
     static status_t ParseInt32(const char *s, int32_t *x);
     static status_t ParseDouble(const char *s, double *x);
 
diff --git a/media/libstagefright/httplive/PlaylistFetcher.cpp b/media/libstagefright/httplive/PlaylistFetcher.cpp
index 10437c9..80cb2d0 100644
--- a/media/libstagefright/httplive/PlaylistFetcher.cpp
+++ b/media/libstagefright/httplive/PlaylistFetcher.cpp
@@ -49,7 +49,7 @@
 // static
 const int64_t PlaylistFetcher::kMinBufferedDurationUs = 10000000ll;
 const int64_t PlaylistFetcher::kMaxMonitorDelayUs = 3000000ll;
-const int32_t PlaylistFetcher::kDownloadBlockSize = 192;
+const int32_t PlaylistFetcher::kDownloadBlockSize = 2048;
 const int32_t PlaylistFetcher::kNumSkipFrames = 10;
 
 PlaylistFetcher::PlaylistFetcher(
@@ -62,19 +62,21 @@
       mURI(uri),
       mStreamTypeMask(0),
       mStartTimeUs(-1ll),
-      mMinStartTimeUs(0ll),
-      mStopParams(NULL),
+      mSegmentStartTimeUs(-1ll),
+      mDiscontinuitySeq(-1ll),
+      mStartTimeUsRelative(false),
       mLastPlaylistFetchTimeUs(-1ll),
       mSeqNumber(-1),
       mNumRetries(0),
       mStartup(true),
+      mAdaptive(false),
       mPrepared(false),
-      mSkipToFirstIDRAfterConnect(false),
       mNextPTSTimeUs(-1ll),
       mMonitorQueueGeneration(0),
       mRefreshState(INITIAL_MINIMUM_RELOAD_DELAY),
       mFirstPTSValid(false),
-      mAbsoluteTimeAnchorUs(0ll) {
+      mAbsoluteTimeAnchorUs(0ll),
+      mVideoBuffer(new AnotherPacketSource(NULL)) {
     memset(mPlaylistHash, 0, sizeof(mPlaylistHash));
     mStartTimeUsNotify->setInt32("what", kWhatStartedAt);
     mStartTimeUsNotify->setInt32("streamMask", 0);
@@ -335,8 +337,9 @@
         const sp<AnotherPacketSource> &videoSource,
         const sp<AnotherPacketSource> &subtitleSource,
         int64_t startTimeUs,
-        int64_t minStartTimeUs,
-        int32_t startSeqNumberHint) {
+        int64_t segmentStartTimeUs,
+        int32_t startDiscontinuitySeq,
+        bool adaptive) {
     sp<AMessage> msg = new AMessage(kWhatStart, id());
 
     uint32_t streamTypeMask = 0ul;
@@ -358,8 +361,9 @@
 
     msg->setInt32("streamTypeMask", streamTypeMask);
     msg->setInt64("startTimeUs", startTimeUs);
-    msg->setInt64("minStartTimeUs", minStartTimeUs);
-    msg->setInt32("startSeqNumberHint", startSeqNumberHint);
+    msg->setInt64("segmentStartTimeUs", segmentStartTimeUs);
+    msg->setInt32("startDiscontinuitySeq", startDiscontinuitySeq);
+    msg->setInt32("adaptive", adaptive);
     msg->post();
 }
 
@@ -367,9 +371,9 @@
     (new AMessage(kWhatPause, id()))->post();
 }
 
-void PlaylistFetcher::stopAsync(bool selfTriggered) {
+void PlaylistFetcher::stopAsync(bool clear) {
     sp<AMessage> msg = new AMessage(kWhatStop, id());
-    msg->setInt32("selfTriggered", selfTriggered);
+    msg->setInt32("clear", clear);
     msg->post();
 }
 
@@ -449,10 +453,13 @@
     CHECK(msg->findInt32("streamTypeMask", (int32_t *)&streamTypeMask));
 
     int64_t startTimeUs;
-    int32_t startSeqNumberHint;
+    int64_t segmentStartTimeUs;
+    int32_t startDiscontinuitySeq;
+    int32_t adaptive;
     CHECK(msg->findInt64("startTimeUs", &startTimeUs));
-    CHECK(msg->findInt64("minStartTimeUs", (int64_t *) &mMinStartTimeUs));
-    CHECK(msg->findInt32("startSeqNumberHint", &startSeqNumberHint));
+    CHECK(msg->findInt64("segmentStartTimeUs", &segmentStartTimeUs));
+    CHECK(msg->findInt32("startDiscontinuitySeq", &startDiscontinuitySeq));
+    CHECK(msg->findInt32("adaptive", &adaptive));
 
     if (streamTypeMask & LiveSession::STREAMTYPE_AUDIO) {
         void *ptr;
@@ -482,16 +489,16 @@
     }
 
     mStreamTypeMask = streamTypeMask;
+
     mStartTimeUs = startTimeUs;
+    mSegmentStartTimeUs = segmentStartTimeUs;
+    mDiscontinuitySeq = startDiscontinuitySeq;
 
     if (mStartTimeUs >= 0ll) {
         mSeqNumber = -1;
         mStartup = true;
         mPrepared = false;
-    }
-
-    if (startSeqNumberHint >= 0) {
-        mSeqNumber = startSeqNumberHint;
+        mAdaptive = adaptive;
     }
 
     postMonitorQueue();
@@ -506,11 +513,9 @@
 void PlaylistFetcher::onStop(const sp<AMessage> &msg) {
     cancelMonitorQueue();
 
-    int32_t selfTriggered;
-    CHECK(msg->findInt32("selfTriggered", &selfTriggered));
-    if (!selfTriggered) {
-        // Self triggered stops only happen during switching, in which case we do not want
-        // to clear the discontinuities queued at the end of packet sources.
+    int32_t clear;
+    CHECK(msg->findInt32("clear", &clear));
+    if (clear) {
         for (size_t i = 0; i < mPacketSources.size(); i++) {
             sp<AnotherPacketSource> packetSource = mPacketSources.valueAt(i);
             packetSource->clear();
@@ -552,15 +557,16 @@
         }
 
         // Don't resume if we would stop within a resume threshold.
+        int32_t discontinuitySeq;
         int64_t latestTimeUs = 0, stopTimeUs = 0;
-        sp<AMessage> latestMeta = packetSource->getLatestMeta();
+        sp<AMessage> latestMeta = packetSource->getLatestDequeuedMeta();
         if (latestMeta != NULL
-                && (latestMeta->findInt64("timeUs", &latestTimeUs)
-                && params->findInt64(stopKey, &stopTimeUs))) {
-            int64_t diffUs = stopTimeUs - latestTimeUs;
-            if (diffUs < resumeThreshold(latestMeta)) {
-                stop = true;
-            }
+                && latestMeta->findInt32("discontinuitySeq", &discontinuitySeq)
+                && discontinuitySeq == mDiscontinuitySeq
+                && latestMeta->findInt64("timeUs", &latestTimeUs)
+                && params->findInt64(stopKey, &stopTimeUs)
+                && stopTimeUs - latestTimeUs < resumeThreshold(latestMeta)) {
+            stop = true;
         }
     }
 
@@ -568,7 +574,7 @@
         for (size_t i = 0; i < mPacketSources.size(); i++) {
             mPacketSources.valueAt(i)->queueAccessUnit(mSession->createFormatChangeBuffer());
         }
-        stopAsync(/* selfTriggered = */ true);
+        stopAsync(/* clear = */ false);
         return OK;
     }
 
@@ -737,26 +743,47 @@
         mSeqNumber = lastSeqNumberInPlaylist;
     }
 
+    if (mDiscontinuitySeq < 0) {
+        mDiscontinuitySeq = mPlaylist->getDiscontinuitySeq();
+    }
+
     if (mSeqNumber < 0) {
         CHECK_GE(mStartTimeUs, 0ll);
 
-        if (mPlaylist->isComplete() || mPlaylist->isEvent()) {
-            mSeqNumber = getSeqNumberForTime(mStartTimeUs);
+        if (mSegmentStartTimeUs < 0) {
+            if (!mPlaylist->isComplete() && !mPlaylist->isEvent()) {
+                // If this is a live session, start 3 segments from the end on connect
+                mSeqNumber = lastSeqNumberInPlaylist - 3;
+            } else {
+                mSeqNumber = getSeqNumberForTime(mStartTimeUs);
+                mStartTimeUs -= getSegmentStartTimeUs(mSeqNumber);
+            }
+            mStartTimeUsRelative = true;
             ALOGV("Initial sequence number for time %" PRId64 " is %d from (%d .. %d)",
                     mStartTimeUs, mSeqNumber, firstSeqNumberInPlaylist,
                     lastSeqNumberInPlaylist);
         } else {
-            // If this is a live session, start 3 segments from the end.
-            mSeqNumber = lastSeqNumberInPlaylist - 3;
+            mSeqNumber = getSeqNumberForTime(mSegmentStartTimeUs);
+            if (mAdaptive) {
+                // avoid double fetch/decode
+                mSeqNumber += 1;
+            }
+            ssize_t minSeq = getSeqNumberForDiscontinuity(mDiscontinuitySeq);
+            if (mSeqNumber < minSeq) {
+                mSeqNumber = minSeq;
+            }
+
             if (mSeqNumber < firstSeqNumberInPlaylist) {
                 mSeqNumber = firstSeqNumberInPlaylist;
             }
+
+            if (mSeqNumber > lastSeqNumberInPlaylist) {
+                mSeqNumber = lastSeqNumberInPlaylist;
+            }
             ALOGV("Initial sequence number for live event %d from (%d .. %d)",
                     mSeqNumber, firstSeqNumberInPlaylist,
                     lastSeqNumberInPlaylist);
         }
-
-        mStartTimeUs = -1ll;
     }
 
     if (mSeqNumber < firstSeqNumberInPlaylist
@@ -819,6 +846,7 @@
 
     int32_t val;
     if (itemMeta->findInt32("discontinuity", &val) && val != 0) {
+        mDiscontinuitySeq++;
         discontinuity = true;
     }
 
@@ -850,6 +878,7 @@
     }
 
     // block-wise download
+    bool startup = mStartup;
     ssize_t bytesRead;
     do {
         bytesRead = mSession->fetchFile(
@@ -879,7 +908,7 @@
             return;
         }
 
-        if (mStartup || discontinuity) {
+        if (startup || discontinuity) {
             // Signal discontinuity.
 
             if (mPlaylist->isComplete() || mPlaylist->isEvent()) {
@@ -898,6 +927,8 @@
 
                 discontinuity = false;
             }
+
+            startup = false;
         }
 
         err = OK;
@@ -917,24 +948,19 @@
         }
 
         if (err == -EAGAIN) {
-            // bad starting sequence number hint
+            // starting sequence number too low
             mTSParser.clear();
             postMonitorQueue();
             return;
-        }
-
-        if (err == ERROR_OUT_OF_RANGE) {
+        } else if (err == ERROR_OUT_OF_RANGE) {
             // reached stopping point
-            stopAsync(/* selfTriggered = */ true);
+            stopAsync(/* clear = */ false);
             return;
-        }
-
-        if (err != OK) {
+        } else if (err != OK) {
             notifyError(err);
             return;
         }
 
-        mStartup = false;
     } while (bytesRead != 0);
 
     if (bufferStartsWithTsSyncByte(buffer)) {
@@ -994,11 +1020,44 @@
         return;
     }
 
+    mStartup = false;
     ++mSeqNumber;
 
     postMonitorQueue();
 }
 
+int32_t PlaylistFetcher::getSeqNumberForDiscontinuity(size_t discontinuitySeq) const {
+    int32_t firstSeqNumberInPlaylist;
+    if (mPlaylist->meta() == NULL
+            || !mPlaylist->meta()->findInt32("media-sequence", &firstSeqNumberInPlaylist)) {
+        firstSeqNumberInPlaylist = 0;
+    }
+
+    size_t curDiscontinuitySeq = mPlaylist->getDiscontinuitySeq();
+    if (discontinuitySeq < curDiscontinuitySeq) {
+        return firstSeqNumberInPlaylist <= 0 ? 0 : (firstSeqNumberInPlaylist - 1);
+    }
+
+    size_t index = 0;
+    while (index < mPlaylist->size()) {
+        sp<AMessage> itemMeta;
+        CHECK(mPlaylist->itemAt( index, NULL /* uri */, &itemMeta));
+
+        int64_t discontinuity;
+        if (itemMeta->findInt64("discontinuity", &discontinuity)) {
+            curDiscontinuitySeq++;
+        }
+
+        if (curDiscontinuitySeq == discontinuitySeq) {
+            return firstSeqNumberInPlaylist + index;
+        }
+
+        ++index;
+    }
+
+    return firstSeqNumberInPlaylist + mPlaylist->size();
+}
+
 int32_t PlaylistFetcher::getSeqNumberForTime(int64_t timeUs) const {
     int32_t firstSeqNumberInPlaylist;
     if (mPlaylist->meta() == NULL || !mPlaylist->meta()->findInt32(
@@ -1031,6 +1090,23 @@
     return firstSeqNumberInPlaylist + index;
 }
 
+const sp<ABuffer> &PlaylistFetcher::setAccessUnitProperties(
+        const sp<ABuffer> &accessUnit, const sp<AnotherPacketSource> &source, bool discard) {
+    sp<MetaData> format = source->getFormat();
+    if (format != NULL) {
+        // for simplicity, store a reference to the format in each unit
+        accessUnit->meta()->setObject("format", format);
+    }
+
+    if (discard) {
+        accessUnit->meta()->setInt32("discard", discard);
+    }
+
+    accessUnit->meta()->setInt32("discontinuitySeq", mDiscontinuitySeq);
+    accessUnit->meta()->setInt64("segmentStartTimeUs", getSegmentStartTimeUs(mSeqNumber));
+    return accessUnit;
+}
+
 status_t PlaylistFetcher::extractAndQueueAccessUnitsFromTs(const sp<ABuffer> &buffer) {
     if (mTSParser == NULL) {
         // Use TS_TIMESTAMPS_ARE_ABSOLUTE so pts carry over between fetchers.
@@ -1046,7 +1122,9 @@
         mTSParser->signalDiscontinuity(
                 ATSParser::DISCONTINUITY_SEEK, extra);
 
+        mAbsoluteTimeAnchorUs = mNextPTSTimeUs;
         mNextPTSTimeUs = -1ll;
+        mFirstPTSValid = false;
     }
 
     size_t offset = 0;
@@ -1099,46 +1177,30 @@
             continue;
         }
 
-        if (stream == LiveSession::STREAMTYPE_VIDEO && mVideoMime.empty()) {
-            const char *mime;
-            if (source->getFormat()->findCString(kKeyMIMEType, &mime)) {
-                mVideoMime.setTo(mime);
-                if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AVC)) {
-                    mSkipToFirstIDRAfterConnect = true;
-                }
-            }
-        }
-
         int64_t timeUs;
         sp<ABuffer> accessUnit;
         status_t finalResult;
         while (source->hasBufferAvailable(&finalResult)
                 && source->dequeueAccessUnit(&accessUnit) == OK) {
 
-            if (stream == LiveSession::STREAMTYPE_VIDEO && mSkipToFirstIDRAfterConnect) {
-                if (!IsIDR(accessUnit)) {
-                    continue;
-                } else {
-                    mSkipToFirstIDRAfterConnect = false;
-                }
-            }
-
             CHECK(accessUnit->meta()->findInt64("timeUs", &timeUs));
-            if (mMinStartTimeUs > 0) {
-                if (timeUs < mMinStartTimeUs) {
-                    // TODO untested path
-                    // try a later ts
-                    int32_t targetDuration;
-                    mPlaylist->meta()->findInt32("target-duration", &targetDuration);
-                    int32_t incr = (mMinStartTimeUs - timeUs) / 1000000 / targetDuration;
-                    if (incr == 0) {
-                        // increment mSeqNumber by at least one
-                        incr = 1;
+
+            if (mStartup) {
+                if (!mFirstPTSValid) {
+                    mFirstTimeUs = timeUs;
+                    mFirstPTSValid = true;
+                }
+                if (mStartTimeUsRelative) {
+                    timeUs -= mFirstTimeUs;
+                    if (timeUs < 0) {
+                        timeUs = 0;
                     }
-                    mSeqNumber += incr;
-                    err = -EAGAIN;
-                    break;
-                } else {
+                } else if (mAdaptive && timeUs > mStartTimeUs) {
+                    int32_t seq;
+                    if (mStartTimeUsNotify != NULL
+                            && !mStartTimeUsNotify->findInt32("discontinuitySeq", &seq)) {
+                        mStartTimeUsNotify->setInt32("discontinuitySeq", mDiscontinuitySeq);
+                    }
                     int64_t startTimeUs;
                     if (mStartTimeUsNotify != NULL
                             && !mStartTimeUsNotify->findInt64(key, &startTimeUs)) {
@@ -1155,12 +1217,51 @@
                         }
                     }
                 }
+
+                if (timeUs < mStartTimeUs) {
+                    if (mAdaptive) {
+                        int32_t targetDuration;
+                        mPlaylist->meta()->findInt32("target-duration", &targetDuration);
+                        int32_t incr = (mStartTimeUs - timeUs) / 1000000 / targetDuration;
+                        if (incr == 0) {
+                            // increment mSeqNumber by at least one
+                            incr = 1;
+                        }
+                        mSeqNumber += incr;
+                        err = -EAGAIN;
+                        break;
+                    } else {
+                        // buffer up to the closest preceding IDR frame
+                        ALOGV("timeUs %" PRId64 " us < mStartTimeUs %" PRId64 " us",
+                                timeUs, mStartTimeUs);
+                        const char *mime;
+                        sp<MetaData> format  = source->getFormat();
+                        bool isAvc = false;
+                        if (format != NULL && format->findCString(kKeyMIMEType, &mime)
+                                && !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AVC)) {
+                            isAvc = true;
+                        }
+                        if (isAvc && IsIDR(accessUnit)) {
+                            mVideoBuffer->clear();
+                        }
+                        if (isAvc) {
+                            mVideoBuffer->queueAccessUnit(accessUnit);
+                        }
+
+                        continue;
+                    }
+                }
             }
 
             if (mStopParams != NULL) {
                 // Queue discontinuity in original stream.
+                int32_t discontinuitySeq;
                 int64_t stopTimeUs;
-                if (!mStopParams->findInt64(key, &stopTimeUs) || timeUs >= stopTimeUs) {
+                if (!mStopParams->findInt32("discontinuitySeq", &discontinuitySeq)
+                        || discontinuitySeq > mDiscontinuitySeq
+                        || !mStopParams->findInt64(key, &stopTimeUs)
+                        || (discontinuitySeq == mDiscontinuitySeq
+                                && timeUs >= stopTimeUs)) {
                     packetSource->queueAccessUnit(mSession->createFormatChangeBuffer());
                     mStreamTypeMask &= ~stream;
                     mPacketSources.removeItemsAt(i);
@@ -1169,15 +1270,18 @@
             }
 
             // Note that we do NOT dequeue any discontinuities except for format change.
-
-            // for simplicity, store a reference to the format in each unit
-            sp<MetaData> format = source->getFormat();
-            if (format != NULL) {
-                accessUnit->meta()->setObject("format", format);
+            if (stream == LiveSession::STREAMTYPE_VIDEO) {
+                const bool discard = true;
+                status_t status;
+                while (mVideoBuffer->hasBufferAvailable(&status)) {
+                    sp<ABuffer> videoBuffer;
+                    mVideoBuffer->dequeueAccessUnit(&videoBuffer);
+                    setAccessUnitProperties(videoBuffer, source, discard);
+                    packetSource->queueAccessUnit(videoBuffer);
+                }
             }
 
-            // Stash the sequence number so we can hint future playlist where to start at.
-            accessUnit->meta()->setInt32("seq", mSeqNumber);
+            setAccessUnitProperties(accessUnit, source);
             packetSource->queueAccessUnit(accessUnit);
         }
 
@@ -1244,7 +1348,8 @@
         CHECK(itemMeta->findInt64("durationUs", &durationUs));
         buffer->meta()->setInt64("timeUs", getSegmentStartTimeUs(mSeqNumber));
         buffer->meta()->setInt64("durationUs", durationUs);
-        buffer->meta()->setInt32("seq", mSeqNumber);
+        buffer->meta()->setInt64("segmentStartTimeUs", getSegmentStartTimeUs(mSeqNumber));
+        buffer->meta()->setInt32("discontinuitySeq", mDiscontinuitySeq);
 
         packetSource->queueAccessUnit(buffer);
         return OK;
@@ -1310,14 +1415,6 @@
         firstID3Tag = false;
     }
 
-    if (!mFirstPTSValid) {
-        mFirstPTSValid = true;
-        mFirstPTS = PTS;
-    }
-    PTS -= mFirstPTS;
-
-    int64_t timeUs = (PTS * 100ll) / 9ll + mAbsoluteTimeAnchorUs;
-
     if (mStreamTypeMask != LiveSession::STREAMTYPE_AUDIO) {
         ALOGW("This stream only contains audio data!");
 
@@ -1360,6 +1457,12 @@
     int32_t sampleRate;
     CHECK(packetSource->getFormat()->findInt32(kKeySampleRate, &sampleRate));
 
+    int64_t timeUs = (PTS * 100ll) / 9ll;
+    if (!mFirstPTSValid) {
+        mFirstPTSValid = true;
+        mFirstTimeUs = timeUs;
+    }
+
     size_t offset = 0;
     while (offset < buffer->size()) {
         const uint8_t *adtsHeader = buffer->data() + offset;
@@ -1384,19 +1487,32 @@
 
         CHECK_LE(offset + aac_frame_length, buffer->size());
 
-        sp<ABuffer> unit = new ABuffer(aac_frame_length);
-        memcpy(unit->data(), adtsHeader, aac_frame_length);
-
         int64_t unitTimeUs = timeUs + numSamples * 1000000ll / sampleRate;
-        unit->meta()->setInt64("timeUs", unitTimeUs);
+        offset += aac_frame_length;
 
         // Each AAC frame encodes 1024 samples.
         numSamples += 1024;
 
-        unit->meta()->setInt32("seq", mSeqNumber);
-        packetSource->queueAccessUnit(unit);
+        if (mStartup) {
+            int64_t startTimeUs = unitTimeUs;
+            if (mStartTimeUsRelative) {
+                startTimeUs -= mFirstTimeUs;
+                if (startTimeUs  < 0) {
+                    startTimeUs = 0;
+                }
+            }
+            if (startTimeUs < mStartTimeUs) {
+                continue;
+            }
+        }
 
-        offset += aac_frame_length;
+        sp<ABuffer> unit = new ABuffer(aac_frame_length);
+        memcpy(unit->data(), adtsHeader, aac_frame_length);
+
+        unit->meta()->setInt64("timeUs", unitTimeUs);
+        unit->meta()->setInt64("segmentStartTimeUs", getSegmentStartTimeUs(mSeqNumber));
+        unit->meta()->setInt32("discontinuitySeq", mDiscontinuitySeq);
+        packetSource->queueAccessUnit(unit);
     }
 
     return OK;
diff --git a/media/libstagefright/httplive/PlaylistFetcher.h b/media/libstagefright/httplive/PlaylistFetcher.h
index e4fdbff..daefb26 100644
--- a/media/libstagefright/httplive/PlaylistFetcher.h
+++ b/media/libstagefright/httplive/PlaylistFetcher.h
@@ -57,13 +57,15 @@
             const sp<AnotherPacketSource> &audioSource,
             const sp<AnotherPacketSource> &videoSource,
             const sp<AnotherPacketSource> &subtitleSource,
-            int64_t startTimeUs = -1ll,
-            int64_t minStartTimeUs = 0ll /* start after this timestamp */,
-            int32_t startSeqNumberHint = -1 /* try starting at this sequence number */);
+            int64_t startTimeUs = -1ll,         // starting timestamps
+            int64_t segmentStartTimeUs = -1ll, // starting position within playlist
+            // startTimeUs!=segmentStartTimeUs only when playlist is live
+            int32_t startDiscontinuitySeq = 0,
+            bool adaptive = false);
 
     void pauseAsync();
 
-    void stopAsync(bool selfTriggered = false);
+    void stopAsync(bool clear = true);
 
     void resumeUntilAsync(const sp<AMessage> &params);
 
@@ -99,11 +101,12 @@
 
     sp<LiveSession> mSession;
     AString mURI;
-    AString mVideoMime;
 
     uint32_t mStreamTypeMask;
     int64_t mStartTimeUs;
-    int64_t mMinStartTimeUs; // start fetching no earlier than this value
+    int64_t mSegmentStartTimeUs;
+    ssize_t mDiscontinuitySeq;
+    bool mStartTimeUsRelative;
     sp<AMessage> mStopParams; // message containing the latest timestamps we should fetch.
 
     KeyedVector<LiveSession::StreamType, sp<AnotherPacketSource> >
@@ -116,8 +119,8 @@
     int32_t mSeqNumber;
     int32_t mNumRetries;
     bool mStartup;
+    bool mAdaptive;
     bool mPrepared;
-    bool mSkipToFirstIDRAfterConnect;
     int64_t mNextPTSTimeUs;
 
     int32_t mMonitorQueueGeneration;
@@ -136,7 +139,9 @@
 
     bool mFirstPTSValid;
     uint64_t mFirstPTS;
+    int64_t mFirstTimeUs;
     int64_t mAbsoluteTimeAnchorUs;
+    sp<AnotherPacketSource> mVideoBuffer;
 
     // Stores the initialization vector to decrypt the next block of cipher text, which can
     // either be derived from the sequence number, read from the manifest, or copied from
@@ -175,6 +180,10 @@
     // Resume a fetcher to continue until the stopping point stored in msg.
     status_t onResumeUntil(const sp<AMessage> &msg);
 
+    const sp<ABuffer> &setAccessUnitProperties(
+            const sp<ABuffer> &accessUnit,
+            const sp<AnotherPacketSource> &source,
+            bool discard = false);
     status_t extractAndQueueAccessUnitsFromTs(const sp<ABuffer> &buffer);
 
     status_t extractAndQueueAccessUnits(
@@ -185,6 +194,8 @@
     void queueDiscontinuity(
             ATSParser::DiscontinuityType type, const sp<AMessage> &extra);
 
+    int32_t getSeqNumberWithAnchorTime(int64_t anchorTimeUs) const;
+    int32_t getSeqNumberForDiscontinuity(size_t discontinuitySeq) const;
     int32_t getSeqNumberForTime(int64_t timeUs) const;
 
     void updateDuration();
diff --git a/media/libstagefright/include/SoftVideoDecoderOMXComponent.h b/media/libstagefright/include/SoftVideoDecoderOMXComponent.h
index d050fa6..7f200dd 100644
--- a/media/libstagefright/include/SoftVideoDecoderOMXComponent.h
+++ b/media/libstagefright/include/SoftVideoDecoderOMXComponent.h
@@ -27,8 +27,6 @@
 #include <utils/threads.h>
 #include <utils/Vector.h>
 
-#define ARRAY_SIZE(a) (sizeof(a) / sizeof(*(a)))
-
 namespace android {
 
 struct SoftVideoDecoderOMXComponent : public SimpleSoftOMXComponent {
diff --git a/media/libstagefright/include/WVMExtractor.h b/media/libstagefright/include/WVMExtractor.h
index 8e62946..ab7e8b8 100644
--- a/media/libstagefright/include/WVMExtractor.h
+++ b/media/libstagefright/include/WVMExtractor.h
@@ -49,6 +49,7 @@
     virtual sp<MediaSource> getTrack(size_t index);
     virtual sp<MetaData> getTrackMetaData(size_t index, uint32_t flags);
     virtual sp<MetaData> getMetaData();
+    virtual void setUID(uid_t uid);
 
     // Return the amount of data cached from the current
     // playback positiion (in us).
@@ -74,8 +75,6 @@
     // codec.
     void setCryptoPluginMode(bool cryptoPluginMode);
 
-    void setUID(uid_t uid);
-
     static bool getVendorLibHandle();
 
     status_t getError();
diff --git a/media/libstagefright/mpeg2ts/ATSParser.cpp b/media/libstagefright/mpeg2ts/ATSParser.cpp
index eda6387..6d8866a 100644
--- a/media/libstagefright/mpeg2ts/ATSParser.cpp
+++ b/media/libstagefright/mpeg2ts/ATSParser.cpp
@@ -894,6 +894,12 @@
                 ALOGV("Stream PID 0x%08x of type 0x%02x now has data.",
                      mElementaryPID, mStreamType);
 
+                const char *mime;
+                if (meta->findCString(kKeyMIMEType, &mime)
+                        && !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AVC)
+                        && !IsIDR(accessUnit)) {
+                    continue;
+                }
                 mSource = new AnotherPacketSource(meta);
                 mSource->queueAccessUnit(accessUnit);
             }
diff --git a/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp b/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp
index 72c9dae..010063f 100644
--- a/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp
+++ b/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp
@@ -14,6 +14,9 @@
  * limitations under the License.
  */
 
+//#define LOG_NDEBUG 0
+#define LOG_TAG "AnotherPacketSource"
+
 #include "AnotherPacketSource.h"
 
 #include <media/stagefright/foundation/ABuffer.h>
@@ -38,7 +41,8 @@
       mFormat(NULL),
       mLastQueuedTimeUs(0),
       mEOSResult(OK),
-      mLatestEnqueuedMeta(NULL) {
+      mLatestEnqueuedMeta(NULL),
+      mLatestDequeuedMeta(NULL) {
     setFormat(meta);
 }
 
@@ -92,7 +96,7 @@
 
         sp<RefBase> object;
         if (buffer->meta()->findObject("format", &object)) {
-            return static_cast<MetaData*>(object.get());
+            return mFormat = static_cast<MetaData*>(object.get());
         }
 
         ++it;
@@ -121,6 +125,8 @@
             return INFO_DISCONTINUITY;
         }
 
+        mLatestDequeuedMeta = (*buffer)->meta()->dup();
+
         sp<RefBase> object;
         if ((*buffer)->meta()->findObject("format", &object)) {
             mFormat = static_cast<MetaData*>(object.get());
@@ -142,8 +148,10 @@
     }
 
     if (!mBuffers.empty()) {
+
         const sp<ABuffer> buffer = *mBuffers.begin();
         mBuffers.erase(mBuffers.begin());
+        mLatestDequeuedMeta = buffer->meta()->dup();
 
         int32_t discontinuity;
         if (buffer->meta()->findInt32("discontinuity", &discontinuity)) {
@@ -202,7 +210,7 @@
     mBuffers.push_back(buffer);
     mCondition.signal();
 
-    if (!mLatestEnqueuedMeta.get()) {
+    if (mLatestEnqueuedMeta == NULL) {
         mLatestEnqueuedMeta = buffer->meta();
     } else {
         int64_t latestTimeUs = 0;
@@ -341,9 +349,14 @@
     return (mEOSResult != OK);
 }
 
-sp<AMessage> AnotherPacketSource::getLatestMeta() {
+sp<AMessage> AnotherPacketSource::getLatestEnqueuedMeta() {
     Mutex::Autolock autoLock(mLock);
     return mLatestEnqueuedMeta;
 }
 
+sp<AMessage> AnotherPacketSource::getLatestDequeuedMeta() {
+    Mutex::Autolock autoLock(mLock);
+    return mLatestDequeuedMeta;
+}
+
 }  // namespace android
diff --git a/media/libstagefright/mpeg2ts/AnotherPacketSource.h b/media/libstagefright/mpeg2ts/AnotherPacketSource.h
index f38f9dc..0c717d7 100644
--- a/media/libstagefright/mpeg2ts/AnotherPacketSource.h
+++ b/media/libstagefright/mpeg2ts/AnotherPacketSource.h
@@ -64,7 +64,8 @@
 
     bool isFinished(int64_t duration) const;
 
-    sp<AMessage> getLatestMeta();
+    sp<AMessage> getLatestEnqueuedMeta();
+    sp<AMessage> getLatestDequeuedMeta();
 
 protected:
     virtual ~AnotherPacketSource();
@@ -80,6 +81,7 @@
     List<sp<ABuffer> > mBuffers;
     status_t mEOSResult;
     sp<AMessage> mLatestEnqueuedMeta;
+    sp<AMessage> mLatestDequeuedMeta;
 
     bool wasFormatChange(int32_t discontinuityType) const;
 
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index b8cc33a..1f77b2f 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -1579,6 +1579,25 @@
     return NO_ERROR;
 }
 
+audio_hw_sync_t AudioFlinger::getAudioHwSyncForSession(audio_session_t sessionId)
+{
+    Mutex::Autolock _l(mLock);
+    for (size_t i = 0; i < mPlaybackThreads.size(); i++) {
+        sp<PlaybackThread> thread = mPlaybackThreads.valueAt(i);
+        if ((thread->hasAudioSession(sessionId) & ThreadBase::TRACK_SESSION) != 0) {
+            // A session can only be on one thread, so exit after first match
+            String8 reply = thread->getParameters(String8(AUDIO_PARAMETER_STREAM_HW_AV_SYNC));
+            AudioParameter param = AudioParameter(reply);
+            int value;
+            if (param.getInt(String8(AUDIO_PARAMETER_STREAM_HW_AV_SYNC), value) == NO_ERROR) {
+                return value;
+            }
+            break;
+        }
+    }
+    return AUDIO_HW_SYNC_INVALID;
+}
+
 // ----------------------------------------------------------------------------
 
 
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index 31c5a1a..4e9d49b 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -248,6 +248,9 @@
     /* Set audio port configuration */
     virtual status_t setAudioPortConfig(const struct audio_port_config *config);
 
+    /* Get the HW synchronization source used for an audio session */
+    virtual audio_hw_sync_t getAudioHwSyncForSession(audio_session_t sessionId);
+
     virtual     status_t    onTransact(
                                 uint32_t code,
                                 const Parcel& data,
diff --git a/services/audioflinger/AudioMixer.cpp b/services/audioflinger/AudioMixer.cpp
index 6edca1b..7ac2c0c 100644
--- a/services/audioflinger/AudioMixer.cpp
+++ b/services/audioflinger/AudioMixer.cpp
@@ -1797,109 +1797,6 @@
     }
 }
 
-#if 0
-// 2 tracks is also a common case
-// NEVER used in current implementation of process__validate()
-// only use if the 2 tracks have the same output buffer
-void AudioMixer::process__TwoTracks16BitsStereoNoResampling(state_t* state,
-                                                            int64_t pts)
-{
-    int i;
-    uint32_t en = state->enabledTracks;
-
-    i = 31 - __builtin_clz(en);
-    const track_t& t0 = state->tracks[i];
-    AudioBufferProvider::Buffer& b0(t0.buffer);
-
-    en &= ~(1<<i);
-    i = 31 - __builtin_clz(en);
-    const track_t& t1 = state->tracks[i];
-    AudioBufferProvider::Buffer& b1(t1.buffer);
-
-    const int16_t *in0;
-    const int16_t vl0 = t0.volume[0];
-    const int16_t vr0 = t0.volume[1];
-    size_t frameCount0 = 0;
-
-    const int16_t *in1;
-    const int16_t vl1 = t1.volume[0];
-    const int16_t vr1 = t1.volume[1];
-    size_t frameCount1 = 0;
-
-    //FIXME: only works if two tracks use same buffer
-    int32_t* out = t0.mainBuffer;
-    size_t numFrames = state->frameCount;
-    const int16_t *buff = NULL;
-
-
-    while (numFrames) {
-
-        if (frameCount0 == 0) {
-            b0.frameCount = numFrames;
-            int64_t outputPTS = calculateOutputPTS(t0, pts,
-                                                   out - t0.mainBuffer);
-            t0.bufferProvider->getNextBuffer(&b0, outputPTS);
-            if (b0.i16 == NULL) {
-                if (buff == NULL) {
-                    buff = new int16_t[MAX_NUM_CHANNELS * state->frameCount];
-                }
-                in0 = buff;
-                b0.frameCount = numFrames;
-            } else {
-                in0 = b0.i16;
-            }
-            frameCount0 = b0.frameCount;
-        }
-        if (frameCount1 == 0) {
-            b1.frameCount = numFrames;
-            int64_t outputPTS = calculateOutputPTS(t1, pts,
-                                                   out - t0.mainBuffer);
-            t1.bufferProvider->getNextBuffer(&b1, outputPTS);
-            if (b1.i16 == NULL) {
-                if (buff == NULL) {
-                    buff = new int16_t[MAX_NUM_CHANNELS * state->frameCount];
-                }
-                in1 = buff;
-                b1.frameCount = numFrames;
-            } else {
-                in1 = b1.i16;
-            }
-            frameCount1 = b1.frameCount;
-        }
-
-        size_t outFrames = frameCount0 < frameCount1?frameCount0:frameCount1;
-
-        numFrames -= outFrames;
-        frameCount0 -= outFrames;
-        frameCount1 -= outFrames;
-
-        do {
-            int32_t l0 = *in0++;
-            int32_t r0 = *in0++;
-            l0 = mul(l0, vl0);
-            r0 = mul(r0, vr0);
-            int32_t l = *in1++;
-            int32_t r = *in1++;
-            l = mulAdd(l, vl1, l0) >> 12;
-            r = mulAdd(r, vr1, r0) >> 12;
-            // clamping...
-            l = clamp16(l);
-            r = clamp16(r);
-            *out++ = (r<<16) | (l & 0xFFFF);
-        } while (--outFrames);
-
-        if (frameCount0 == 0) {
-            t0.bufferProvider->releaseBuffer(&b0);
-        }
-        if (frameCount1 == 0) {
-            t1.bufferProvider->releaseBuffer(&b1);
-        }
-    }
-
-    delete [] buff;
-}
-#endif
-
 int64_t AudioMixer::calculateOutputPTS(const track_t& t, int64_t basePTS,
                                        int outputFrameIndex)
 {
diff --git a/services/audioflinger/AudioMixer.h b/services/audioflinger/AudioMixer.h
index 5ba377b..3b972bb 100644
--- a/services/audioflinger/AudioMixer.h
+++ b/services/audioflinger/AudioMixer.h
@@ -408,10 +408,6 @@
     static void process__genericResampling(state_t* state, int64_t pts);
     static void process__OneTrack16BitsStereoNoResampling(state_t* state,
                                                           int64_t pts);
-#if 0
-    static void process__TwoTracks16BitsStereoNoResampling(state_t* state,
-                                                           int64_t pts);
-#endif
 
     static int64_t calculateOutputPTS(const track_t& t, int64_t basePTS,
                                       int outputFrameIndex);
diff --git a/services/audioflinger/Effects.cpp b/services/audioflinger/Effects.cpp
index 77aca00..ec3d731 100644
--- a/services/audioflinger/Effects.cpp
+++ b/services/audioflinger/Effects.cpp
@@ -1387,7 +1387,12 @@
 // Must be called with EffectChain::mLock locked
 void AudioFlinger::EffectChain::clearInputBuffer_l(sp<ThreadBase> thread)
 {
-    memset(mInBuffer, 0, thread->frameCount() * thread->frameSize());
+    // TODO: This will change in the future, depending on multichannel
+    // and sample format changes for effects.
+    // Currently effects processing is only available for stereo, AUDIO_FORMAT_PCM_16_BIT
+    // (4 bytes frame size)
+    const size_t frameSize = audio_bytes_per_sample(AUDIO_FORMAT_PCM_16_BIT) * FCC_2;
+    memset(mInBuffer, 0, thread->frameCount() * frameSize);
 }
 
 // Must be called with EffectChain::mLock locked
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 2e2f533..2f65370 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -2639,12 +2639,9 @@
 
     threadLoop_exit();
 
-    // for DuplicatingThread, standby mode is handled by the outputTracks, otherwise ...
-    if (mType == MIXER || mType == DIRECT || mType == OFFLOAD) {
-        // put output stream into standby mode
-        if (!mStandby) {
-            mOutput->stream->common.standby(&mOutput->stream->common);
-        }
+    if (!mStandby) {
+        threadLoop_standby();
+        mStandby = true;
     }
 
     releaseWakeLock();
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index 48093da..c5ab832 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -1885,10 +1885,10 @@
     buf.mFrameCount = buffer->frameCount;
     status_t status = mPeerProxy->obtainBuffer(&buf, &mPeerTimeout);
     ALOGV_IF(status != NO_ERROR, "PatchTrack() %p getNextBuffer status %d", this, status);
+    buffer->frameCount = buf.mFrameCount;
     if (buf.mFrameCount == 0) {
         return WOULD_BLOCK;
     }
-    buffer->frameCount = buf.mFrameCount;
     status = Track::getNextBuffer(buffer, pts);
     return status;
 }
@@ -2166,10 +2166,10 @@
     status_t status = mPeerProxy->obtainBuffer(&buf, &mPeerTimeout);
     ALOGV_IF(status != NO_ERROR,
              "PatchRecord() %p mPeerProxy->obtainBuffer status %d", this, status);
+    buffer->frameCount = buf.mFrameCount;
     if (buf.mFrameCount == 0) {
         return WOULD_BLOCK;
     }
-    buffer->frameCount = buf.mFrameCount;
     status = RecordTrack::getNextBuffer(buffer, pts);
     return status;
 }
diff --git a/services/audiopolicy/Android.mk b/services/audiopolicy/Android.mk
index f3be42d..6512c38 100644
--- a/services/audiopolicy/Android.mk
+++ b/services/audiopolicy/Android.mk
@@ -30,7 +30,7 @@
     libbinder \
     libmedia \
     libhardware \
-    libhardware_legacy \
+    libhardware_legacy
 
 ifneq ($(USE_LEGACY_AUDIO_POLICY), 1)
 LOCAL_SHARED_LIBRARIES += \
@@ -58,7 +58,8 @@
 LOCAL_SHARED_LIBRARIES := \
     libcutils \
     libutils \
-    liblog
+    liblog \
+    libsoundtrigger
 
 LOCAL_STATIC_LIBRARIES := \
     libmedia_helper
diff --git a/services/audiopolicy/AudioPolicyClientImpl.cpp b/services/audiopolicy/AudioPolicyClientImpl.cpp
index c0019d1..3e090e9 100644
--- a/services/audiopolicy/AudioPolicyClientImpl.cpp
+++ b/services/audiopolicy/AudioPolicyClientImpl.cpp
@@ -17,6 +17,7 @@
 #define LOG_TAG "AudioPolicyClientImpl"
 //#define LOG_NDEBUG 0
 
+#include <soundtrigger/SoundTrigger.h>
 #include <utils/Log.h>
 #include "AudioPolicyService.h"
 
diff --git a/services/audiopolicy/AudioPolicyInterface.h b/services/audiopolicy/AudioPolicyInterface.h
index 50ee803..5524463 100644
--- a/services/audiopolicy/AudioPolicyInterface.h
+++ b/services/audiopolicy/AudioPolicyInterface.h
@@ -190,6 +190,11 @@
     virtual status_t setAudioPortConfig(const struct audio_port_config *config) = 0;
     virtual void clearAudioPatches(uid_t uid) = 0;
 
+    virtual status_t acquireSoundTriggerSession(audio_session_t *session,
+                                           audio_io_handle_t *ioHandle,
+                                           audio_devices_t *device) = 0;
+
+    virtual status_t releaseSoundTriggerSession(audio_session_t session) = 0;
 };
 
 
diff --git a/services/audiopolicy/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/AudioPolicyInterfaceImpl.cpp
index 75745b3..2c51e25 100644
--- a/services/audiopolicy/AudioPolicyInterfaceImpl.cpp
+++ b/services/audiopolicy/AudioPolicyInterfaceImpl.cpp
@@ -531,4 +531,24 @@
     return mAudioPolicyManager->setAudioPortConfig(config);
 }
 
+status_t AudioPolicyService::acquireSoundTriggerSession(audio_session_t *session,
+                                       audio_io_handle_t *ioHandle,
+                                       audio_devices_t *device)
+{
+    if (mAudioPolicyManager == NULL) {
+        return NO_INIT;
+    }
+
+    return mAudioPolicyManager->acquireSoundTriggerSession(session, ioHandle, device);
+}
+
+status_t AudioPolicyService::releaseSoundTriggerSession(audio_session_t session)
+{
+    if (mAudioPolicyManager == NULL) {
+        return NO_INIT;
+    }
+
+    return mAudioPolicyManager->releaseSoundTriggerSession(session);
+}
+
 }; // namespace android
diff --git a/services/audiopolicy/AudioPolicyInterfaceImplLegacy.cpp b/services/audiopolicy/AudioPolicyInterfaceImplLegacy.cpp
index aa46ace..f20c070 100644
--- a/services/audiopolicy/AudioPolicyInterfaceImplLegacy.cpp
+++ b/services/audiopolicy/AudioPolicyInterfaceImplLegacy.cpp
@@ -496,10 +496,21 @@
                                     audio_output_flags_t flags,
                                     const audio_offload_info_t *offloadInfo)
 {
-    //FIXME: temporary to fix build with USE_LEGACY_AUDIO_POLICY
-    audio_stream_type_t stream = AUDIO_STREAM_MUSIC;
+    audio_stream_type_t stream = audio_attributes_to_stream_type(attr);
+
     return getOutput(stream, samplingRate, format, channelMask, flags, offloadInfo);
 }
 
+status_t AudioPolicyService::acquireSoundTriggerSession(audio_session_t *session,
+                                       audio_io_handle_t *ioHandle,
+                                       audio_devices_t *device)
+{
+    return INVALID_OPERATION;
+}
+
+status_t AudioPolicyService::releaseSoundTriggerSession(audio_session_t session)
+{
+    return INVALID_OPERATION;
+}
 
 }; // namespace android
diff --git a/services/audiopolicy/AudioPolicyManager.cpp b/services/audiopolicy/AudioPolicyManager.cpp
index 440f5d0..f95b839 100644
--- a/services/audiopolicy/AudioPolicyManager.cpp
+++ b/services/audiopolicy/AudioPolicyManager.cpp
@@ -43,6 +43,7 @@
 #include <hardware/audio.h>
 #include <hardware/audio_effect.h>
 #include <media/AudioParameter.h>
+#include <soundtrigger/SoundTrigger.h>
 #include "AudioPolicyManager.h"
 #include "audio_policy_conf.h"
 
@@ -87,14 +88,15 @@
     STRING_TO_ENUM(AUDIO_DEVICE_OUT_SPDIF),
     STRING_TO_ENUM(AUDIO_DEVICE_OUT_FM),
     STRING_TO_ENUM(AUDIO_DEVICE_OUT_AUX_LINE),
+    STRING_TO_ENUM(AUDIO_DEVICE_IN_AMBIENT),
     STRING_TO_ENUM(AUDIO_DEVICE_IN_BUILTIN_MIC),
     STRING_TO_ENUM(AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET),
     STRING_TO_ENUM(AUDIO_DEVICE_IN_ALL_SCO),
     STRING_TO_ENUM(AUDIO_DEVICE_IN_WIRED_HEADSET),
     STRING_TO_ENUM(AUDIO_DEVICE_IN_AUX_DIGITAL),
     STRING_TO_ENUM(AUDIO_DEVICE_IN_HDMI),
-    STRING_TO_ENUM(AUDIO_DEVICE_IN_VOICE_CALL),
     STRING_TO_ENUM(AUDIO_DEVICE_IN_TELEPHONY_RX),
+    STRING_TO_ENUM(AUDIO_DEVICE_IN_VOICE_CALL),
     STRING_TO_ENUM(AUDIO_DEVICE_IN_BACK_MIC),
     STRING_TO_ENUM(AUDIO_DEVICE_IN_REMOTE_SUBMIX),
     STRING_TO_ENUM(AUDIO_DEVICE_IN_ANLG_DOCK_HEADSET),
@@ -116,6 +118,7 @@
     STRING_TO_ENUM(AUDIO_OUTPUT_FLAG_DEEP_BUFFER),
     STRING_TO_ENUM(AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD),
     STRING_TO_ENUM(AUDIO_OUTPUT_FLAG_NON_BLOCKING),
+    STRING_TO_ENUM(AUDIO_OUTPUT_FLAG_HW_AV_SYNC),
 };
 
 const StringToEnum sFormatNameToEnumTable[] = {
@@ -238,10 +241,14 @@
             // register new device as available
             index = mAvailableOutputDevices.add(devDesc);
             if (index >= 0) {
-                mAvailableOutputDevices[index]->mId = nextUniqueId();
                 sp<HwModule> module = getModuleForDevice(device);
-                ALOG_ASSERT(module != NULL, "setDeviceConnectionState():"
-                        "could not find HW module for device %08x", device);
+                if (module == 0) {
+                    ALOGD("setDeviceConnectionState() could not find HW module for device %08x",
+                          device);
+                    mAvailableOutputDevices.remove(devDesc);
+                    return INVALID_OPERATION;
+                }
+                mAvailableOutputDevices[index]->mId = nextUniqueId();
                 mAvailableOutputDevices[index]->mModule = module;
             } else {
                 return NO_MEMORY;
@@ -297,17 +304,24 @@
         }
 
         updateDevicesAndOutputs();
+        if (mPhoneState == AUDIO_MODE_IN_CALL) {
+            audio_devices_t newDevice = getNewOutputDevice(mPrimaryOutput, false /*fromCache*/);
+            updateCallRouting(newDevice);
+        }
         for (size_t i = 0; i < mOutputs.size(); i++) {
-            // do not force device change on duplicated output because if device is 0, it will
-            // also force a device 0 for the two outputs it is duplicated to which may override
-            // a valid device selection on those outputs.
-            bool force = !mOutputs.valueAt(i)->isDuplicated()
-                    && (!deviceDistinguishesOnAddress(device)
-                            // always force when disconnecting (a non-duplicated device)
-                            || (state == AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE));
-            setOutputDevice(mOutputs.keyAt(i),
-                            getNewOutputDevice(mOutputs.keyAt(i), true /*fromCache*/),
-                            force, 0);
+            audio_io_handle_t output = mOutputs.keyAt(i);
+            if ((mPhoneState != AUDIO_MODE_IN_CALL) || (output != mPrimaryOutput)) {
+                audio_devices_t newDevice = getNewOutputDevice(mOutputs.keyAt(i),
+                                                               true /*fromCache*/);
+                // do not force device change on duplicated output because if device is 0, it will
+                // also force a device 0 for the two outputs it is duplicated to which may override
+                // a valid device selection on those outputs.
+                bool force = !mOutputs.valueAt(i)->isDuplicated()
+                        && (!deviceDistinguishesOnAddress(device)
+                                // always force when disconnecting (a non-duplicated device)
+                                || (state == AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE));
+                setOutputDevice(output, newDevice, force, 0);
+            }
         }
 
         mpClientInterface->onAudioPortListUpdate();
@@ -365,6 +379,11 @@
 
         closeAllInputs();
 
+        if (mPhoneState == AUDIO_MODE_IN_CALL) {
+            audio_devices_t newDevice = getNewOutputDevice(mPrimaryOutput, false /*fromCache*/);
+            updateCallRouting(newDevice);
+        }
+
         mpClientInterface->onAudioPortListUpdate();
         return NO_ERROR;
     } // end if is input device
@@ -399,10 +418,124 @@
     }
 }
 
+void AudioPolicyManager::updateCallRouting(audio_devices_t rxDevice, int delayMs)
+{
+    bool createTxPatch = false;
+    struct audio_patch patch;
+    patch.num_sources = 1;
+    patch.num_sinks = 1;
+    status_t status;
+    audio_patch_handle_t afPatchHandle;
+    DeviceVector deviceList;
+
+    audio_devices_t txDevice = getDeviceForInputSource(AUDIO_SOURCE_VOICE_COMMUNICATION);
+    ALOGV("updateCallRouting device rxDevice %08x txDevice %08x", rxDevice, txDevice);
+
+    // release existing RX patch if any
+    if (mCallRxPatch != 0) {
+        mpClientInterface->releaseAudioPatch(mCallRxPatch->mAfPatchHandle, 0);
+        mCallRxPatch.clear();
+    }
+    // release TX patch if any
+    if (mCallTxPatch != 0) {
+        mpClientInterface->releaseAudioPatch(mCallTxPatch->mAfPatchHandle, 0);
+        mCallTxPatch.clear();
+    }
+
+    // If the RX device is on the primary HW module, then use legacy routing method for voice calls
+    // via setOutputDevice() on primary output.
+    // Otherwise, create two audio patches for TX and RX path.
+    if (availablePrimaryOutputDevices() & rxDevice) {
+        setOutputDevice(mPrimaryOutput, rxDevice, true, delayMs);
+        // If the TX device is also on the primary HW module, setOutputDevice() will take care
+        // of it due to legacy implementation. If not, create a patch.
+        if ((availablePrimaryInputDevices() & txDevice & ~AUDIO_DEVICE_BIT_IN)
+                == AUDIO_DEVICE_NONE) {
+            createTxPatch = true;
+        }
+    } else {
+        // create RX path audio patch
+        deviceList = mAvailableOutputDevices.getDevicesFromType(rxDevice);
+        ALOG_ASSERT(!deviceList.isEmpty(),
+                    "updateCallRouting() selected device not in output device list");
+        sp<DeviceDescriptor> rxSinkDeviceDesc = deviceList.itemAt(0);
+        deviceList = mAvailableInputDevices.getDevicesFromType(AUDIO_DEVICE_IN_TELEPHONY_RX);
+        ALOG_ASSERT(!deviceList.isEmpty(),
+                    "updateCallRouting() no telephony RX device");
+        sp<DeviceDescriptor> rxSourceDeviceDesc = deviceList.itemAt(0);
+
+        rxSourceDeviceDesc->toAudioPortConfig(&patch.sources[0]);
+        rxSinkDeviceDesc->toAudioPortConfig(&patch.sinks[0]);
+
+        // request to reuse existing output stream if one is already opened to reach the RX device
+        SortedVector<audio_io_handle_t> outputs =
+                                getOutputsForDevice(rxDevice, mOutputs);
+        audio_io_handle_t output = selectOutput(outputs, AUDIO_OUTPUT_FLAG_NONE);
+        if (output != AUDIO_IO_HANDLE_NONE) {
+            sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(output);
+            ALOG_ASSERT(!outputDesc->isDuplicated(),
+                        "updateCallRouting() RX device output is duplicated");
+            outputDesc->toAudioPortConfig(&patch.sources[1]);
+            patch.num_sources = 2;
+        }
+
+        afPatchHandle = AUDIO_PATCH_HANDLE_NONE;
+        status = mpClientInterface->createAudioPatch(&patch, &afPatchHandle, 0);
+        ALOGW_IF(status != NO_ERROR, "updateCallRouting() error %d creating RX audio patch",
+                                               status);
+        if (status == NO_ERROR) {
+            mCallRxPatch = new AudioPatch((audio_patch_handle_t)nextUniqueId(),
+                                       &patch, mUidCached);
+            mCallRxPatch->mAfPatchHandle = afPatchHandle;
+            mCallRxPatch->mUid = mUidCached;
+        }
+        createTxPatch = true;
+    }
+    if (createTxPatch) {
+
+        struct audio_patch patch;
+        patch.num_sources = 1;
+        patch.num_sinks = 1;
+        deviceList = mAvailableInputDevices.getDevicesFromType(txDevice);
+        ALOG_ASSERT(!deviceList.isEmpty(),
+                    "updateCallRouting() selected device not in input device list");
+        sp<DeviceDescriptor> txSourceDeviceDesc = deviceList.itemAt(0);
+        txSourceDeviceDesc->toAudioPortConfig(&patch.sources[0]);
+        deviceList = mAvailableOutputDevices.getDevicesFromType(AUDIO_DEVICE_OUT_TELEPHONY_TX);
+        ALOG_ASSERT(!deviceList.isEmpty(),
+                    "updateCallRouting() no telephony TX device");
+        sp<DeviceDescriptor> txSinkDeviceDesc = deviceList.itemAt(0);
+        txSinkDeviceDesc->toAudioPortConfig(&patch.sinks[0]);
+
+        SortedVector<audio_io_handle_t> outputs =
+                                getOutputsForDevice(AUDIO_DEVICE_OUT_TELEPHONY_TX, mOutputs);
+        audio_io_handle_t output = selectOutput(outputs, AUDIO_OUTPUT_FLAG_NONE);
+        // request to reuse existing output stream if one is already opened to reach the TX
+        // path output device
+        if (output != AUDIO_IO_HANDLE_NONE) {
+            sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(output);
+            ALOG_ASSERT(!outputDesc->isDuplicated(),
+                        "updateCallRouting() RX device output is duplicated");
+            outputDesc->toAudioPortConfig(&patch.sources[1]);
+            patch.num_sources = 2;
+        }
+
+        afPatchHandle = AUDIO_PATCH_HANDLE_NONE;
+        status = mpClientInterface->createAudioPatch(&patch, &afPatchHandle, 0);
+        ALOGW_IF(status != NO_ERROR, "setPhoneState() error %d creating TX audio patch",
+                                               status);
+        if (status == NO_ERROR) {
+            mCallTxPatch = new AudioPatch((audio_patch_handle_t)nextUniqueId(),
+                                       &patch, mUidCached);
+            mCallTxPatch->mAfPatchHandle = afPatchHandle;
+            mCallTxPatch->mUid = mUidCached;
+        }
+    }
+}
+
 void AudioPolicyManager::setPhoneState(audio_mode_t state)
 {
     ALOGV("setPhoneState() state %d", state);
-    audio_devices_t newDevice = AUDIO_DEVICE_NONE;
     if (state < 0 || state >= AUDIO_MODE_CNT) {
         ALOGW("setPhoneState() invalid state %d", state);
         return;
@@ -454,19 +587,12 @@
     }
 
     // check for device and output changes triggered by new phone state
-    newDevice = getNewOutputDevice(mPrimaryOutput, false /*fromCache*/);
     checkA2dpSuspend();
     checkOutputForAllStrategies();
     updateDevicesAndOutputs();
 
     sp<AudioOutputDescriptor> hwOutputDesc = mOutputs.valueFor(mPrimaryOutput);
 
-    // force routing command to audio hardware when ending call
-    // even if no device change is needed
-    if (isStateInCall(oldState) && newDevice == AUDIO_DEVICE_NONE) {
-        newDevice = hwOutputDesc->device();
-    }
-
     int delayMs = 0;
     if (isStateInCall(state)) {
         nsecs_t sysTime = systemTime();
@@ -493,9 +619,30 @@
         }
     }
 
-    // change routing is necessary
-    setOutputDevice(mPrimaryOutput, newDevice, force, delayMs);
+    // Note that despite the fact that getNewOutputDevice() is called on the primary output,
+    // the device returned is not necessarily reachable via this output
+    audio_devices_t rxDevice = getNewOutputDevice(mPrimaryOutput, false /*fromCache*/);
+    // force routing command to audio hardware when ending call
+    // even if no device change is needed
+    if (isStateInCall(oldState) && rxDevice == AUDIO_DEVICE_NONE) {
+        rxDevice = hwOutputDesc->device();
+    }
 
+    if (state == AUDIO_MODE_IN_CALL) {
+        updateCallRouting(rxDevice, delayMs);
+    } else if (oldState == AUDIO_MODE_IN_CALL) {
+        if (mCallRxPatch != 0) {
+            mpClientInterface->releaseAudioPatch(mCallRxPatch->mAfPatchHandle, 0);
+            mCallRxPatch.clear();
+        }
+        if (mCallTxPatch != 0) {
+            mpClientInterface->releaseAudioPatch(mCallTxPatch->mAfPatchHandle, 0);
+            mCallTxPatch.clear();
+        }
+        setOutputDevice(mPrimaryOutput, rxDevice, force, 0);
+    } else {
+        setOutputDevice(mPrimaryOutput, rxDevice, force, 0);
+    }
     // if entering in call state, handle special case of active streams
     // pertaining to sonification strategy see handleIncallSonification()
     if (isStateInCall(state)) {
@@ -584,10 +731,16 @@
     checkA2dpSuspend();
     checkOutputForAllStrategies();
     updateDevicesAndOutputs();
+    if (mPhoneState == AUDIO_MODE_IN_CALL) {
+        audio_devices_t newDevice = getNewOutputDevice(mPrimaryOutput, true /*fromCache*/);
+        updateCallRouting(newDevice);
+    }
     for (size_t i = 0; i < mOutputs.size(); i++) {
         audio_io_handle_t output = mOutputs.keyAt(i);
         audio_devices_t newDevice = getNewOutputDevice(output, true /*fromCache*/);
-        setOutputDevice(output, newDevice, (newDevice != AUDIO_DEVICE_NONE));
+        if ((mPhoneState != AUDIO_MODE_IN_CALL) || (output != mPrimaryOutput)) {
+            setOutputDevice(output, newDevice, (newDevice != AUDIO_DEVICE_NONE));
+        }
         if (forceVolumeReeval && (newDevice != AUDIO_DEVICE_NONE)) {
             applyStreamVolumes(output, newDevice, 0, true);
         }
@@ -665,12 +818,17 @@
         ALOGE("getOutputForAttr() called with NULL audio attributes");
         return 0;
     }
-    ALOGV("getOutputForAttr() usage=%d, content=%d, tag=%s",
-            attr->usage, attr->content_type, attr->tags);
+    ALOGV("getOutputForAttr() usage=%d, content=%d, tag=%s flags=%08x",
+            attr->usage, attr->content_type, attr->tags, attr->flags);
 
     // TODO this is where filtering for custom policies (rerouting, dynamic sources) will go
     routing_strategy strategy = (routing_strategy) getStrategyForAttr(attr);
     audio_devices_t device = getDeviceForStrategy(strategy, false /*fromCache*/);
+
+    if ((attr->flags & AUDIO_FLAG_HW_AV_SYNC) != 0) {
+        flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_HW_AV_SYNC);
+    }
+
     ALOGV("getOutputForAttr() device %d, samplingRate %d, format %x, channelMask %x, flags %x",
           device, samplingRate, format, channelMask, flags);
 
@@ -740,6 +898,9 @@
     if ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0) {
         flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_DIRECT);
     }
+    if ((flags & AUDIO_OUTPUT_FLAG_HW_AV_SYNC) != 0) {
+        flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_DIRECT);
+    }
 
     // Do not allow offloading if one non offloadable effect is enabled. This prevents from
     // creating an offloaded track and tearing it down immediately after start when audioflinger
@@ -1119,6 +1280,17 @@
     config.channel_mask = channelMask;
     config.format = format;
     audio_io_handle_t input = AUDIO_IO_HANDLE_NONE;
+
+    bool isSoundTrigger = false;
+    if (inputSource == AUDIO_SOURCE_HOTWORD) {
+        ssize_t index = mSoundTriggerSessions.indexOfKey(session);
+        if (index >= 0) {
+            input = mSoundTriggerSessions.valueFor(session);
+            isSoundTrigger = true;
+            ALOGV("SoundTrigger capture on session %d input %d", session, input);
+        }
+    }
+
     status_t status = mpClientInterface->openInput(profile->mModule->mHandle,
                                                    &input,
                                                    &config,
@@ -1149,6 +1321,7 @@
     inputDesc->mChannelMask = channelMask;
     inputDesc->mDevice = device;
     inputDesc->mSessions.add(session);
+    inputDesc->mIsSoundTrigger = isSoundTrigger;
 
     addInput(input, inputDesc);
     mpClientInterface->onAudioPortListUpdate();
@@ -1194,6 +1367,9 @@
     }
 
     if (inputDesc->mRefCount == 0) {
+        if (activeInputsCount() == 0) {
+            SoundTrigger::setCaptureState(true);
+        }
         setInputDevice(input, getNewInputDevice(input), true /* force */);
 
         // Automatically enable the remote submix output when input is started.
@@ -1242,6 +1418,10 @@
         }
 
         resetInputDevice(input);
+
+        if (activeInputsCount() == 0) {
+            SoundTrigger::setCaptureState(false);
+        }
     }
     return NO_ERROR;
 }
@@ -1867,6 +2047,25 @@
     return module;
 }
 
+audio_devices_t AudioPolicyManager::availablePrimaryOutputDevices()
+{
+    sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(mPrimaryOutput);
+    audio_devices_t devices = outputDesc->mProfile->mSupportedDevices.types();
+    return devices & mAvailableOutputDevices.types();
+}
+
+audio_devices_t AudioPolicyManager::availablePrimaryInputDevices()
+{
+    audio_module_handle_t primaryHandle =
+                                mOutputs.valueFor(mPrimaryOutput)->mProfile->mModule->mHandle;
+    audio_devices_t devices = AUDIO_DEVICE_NONE;
+    for (size_t i = 0; i < mAvailableInputDevices.size(); i++) {
+        if (mAvailableInputDevices[i]->mModule->mHandle == primaryHandle) {
+            devices |= mAvailableInputDevices[i]->mDeviceType;
+        }
+    }
+    return devices;
+}
 
 status_t AudioPolicyManager::createAudioPatch(const struct audio_patch *patch,
                                                audio_patch_handle_t *handle,
@@ -2253,6 +2452,31 @@
     }
 }
 
+status_t AudioPolicyManager::acquireSoundTriggerSession(audio_session_t *session,
+                                       audio_io_handle_t *ioHandle,
+                                       audio_devices_t *device)
+{
+    *session = (audio_session_t)mpClientInterface->newAudioUniqueId();
+    *ioHandle = (audio_io_handle_t)mpClientInterface->newAudioUniqueId();
+    *device = getDeviceForInputSource(AUDIO_SOURCE_HOTWORD);
+
+    mSoundTriggerSessions.add(*session, *ioHandle);
+
+    return NO_ERROR;
+}
+
+status_t AudioPolicyManager::releaseSoundTriggerSession(audio_session_t session)
+{
+    ssize_t index = mSoundTriggerSessions.indexOfKey(session);
+    if (index < 0) {
+        ALOGW("acquireSoundTriggerSession() session %d not registered", session);
+        return BAD_VALUE;
+    }
+
+    mSoundTriggerSessions.removeItem(session);
+    return NO_ERROR;
+}
+
 status_t AudioPolicyManager::addAudioPatch(audio_patch_handle_t handle,
                                            const sp<AudioPatch>& patch)
 {
@@ -3591,6 +3815,21 @@
         // FALL THROUGH
 
     case STRATEGY_PHONE:
+        // Force use of only devices on primary output if:
+        // - in call AND
+        //   - cannot route from voice call RX OR
+        //   - audio HAL version is < 3.0 and TX device is on the primary HW module
+        if (mPhoneState == AUDIO_MODE_IN_CALL) {
+            audio_devices_t txDevice = getDeviceForInputSource(AUDIO_SOURCE_VOICE_COMMUNICATION);
+            sp<AudioOutputDescriptor> hwOutputDesc = mOutputs.valueFor(mPrimaryOutput);
+            if (((mAvailableInputDevices.types() &
+                    AUDIO_DEVICE_IN_TELEPHONY_RX & ~AUDIO_DEVICE_BIT_IN) == 0) ||
+                    (((txDevice & availablePrimaryInputDevices() & ~AUDIO_DEVICE_BIT_IN) != 0) &&
+                         (hwOutputDesc->mAudioPort->mModule->mHalVersion <
+                             AUDIO_DEVICE_API_VERSION_3_0))) {
+                availableOutputDeviceTypes = availablePrimaryOutputDevices();
+            }
+        }
         // for phone strategy, we first consider the forced use and then the available devices by order
         // of priority
         switch (mForceUse[AUDIO_POLICY_FORCE_FOR_COMMUNICATION]) {
@@ -3620,11 +3859,11 @@
             if (device) break;
             device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_WIRED_HEADSET;
             if (device) break;
+            device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_USB_DEVICE;
+            if (device) break;
             if (mPhoneState != AUDIO_MODE_IN_CALL) {
                 device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_USB_ACCESSORY;
                 if (device) break;
-                device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_USB_DEVICE;
-                if (device) break;
                 device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET;
                 if (device) break;
                 device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_AUX_DIGITAL;
@@ -3661,6 +3900,8 @@
                 device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET;
                 if (device) break;
             }
+            device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_LINE;
+            if (device) break;
             device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_SPEAKER;
             if (device) break;
             device = mDefaultOutputDevice->mDeviceType;
@@ -3717,6 +3958,9 @@
         if (device2 == AUDIO_DEVICE_NONE) {
             device2 = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_WIRED_HEADPHONE;
         }
+        if ((device2 == AUDIO_DEVICE_NONE)) {
+            device2 = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_LINE;
+        }
         if (device2 == AUDIO_DEVICE_NONE) {
             device2 = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_WIRED_HEADSET;
         }
@@ -4013,7 +4257,8 @@
             inputDesc->toAudioPortConfig(&patch.sinks[0]);
             // AUDIO_SOURCE_HOTWORD is for internal use only:
             // handled as AUDIO_SOURCE_VOICE_RECOGNITION by the audio HAL
-            if (patch.sinks[0].ext.mix.usecase.source == AUDIO_SOURCE_HOTWORD) {
+            if (patch.sinks[0].ext.mix.usecase.source == AUDIO_SOURCE_HOTWORD &&
+                    !inputDesc->mIsSoundTrigger) {
                 patch.sinks[0].ext.mix.usecase.source = AUDIO_SOURCE_VOICE_RECOGNITION;
             }
             patch.num_sinks = 1;
@@ -4122,19 +4367,60 @@
           device = AUDIO_DEVICE_IN_VOICE_CALL;
           break;
       }
-      // FALL THROUGH
+      break;
 
     case AUDIO_SOURCE_DEFAULT:
     case AUDIO_SOURCE_MIC:
     if (availableDeviceTypes & AUDIO_DEVICE_IN_BLUETOOTH_A2DP) {
         device = AUDIO_DEVICE_IN_BLUETOOTH_A2DP;
-        break;
+    } else if (availableDeviceTypes & AUDIO_DEVICE_IN_WIRED_HEADSET) {
+        device = AUDIO_DEVICE_IN_WIRED_HEADSET;
+    } else if (availableDeviceTypes & AUDIO_DEVICE_IN_USB_DEVICE) {
+        device = AUDIO_DEVICE_IN_USB_DEVICE;
+    } else if (availableDeviceTypes & AUDIO_DEVICE_IN_BUILTIN_MIC) {
+        device = AUDIO_DEVICE_IN_BUILTIN_MIC;
     }
-    // FALL THROUGH
+    break;
+
+    case AUDIO_SOURCE_VOICE_COMMUNICATION:
+        // Allow only use of devices on primary input if in call and HAL does not support routing
+        // to voice call path.
+        if ((mPhoneState == AUDIO_MODE_IN_CALL) &&
+                (mAvailableOutputDevices.types() & AUDIO_DEVICE_OUT_TELEPHONY_TX) == 0) {
+            availableDeviceTypes = availablePrimaryInputDevices() & ~AUDIO_DEVICE_BIT_IN;
+        }
+
+        switch (mForceUse[AUDIO_POLICY_FORCE_FOR_COMMUNICATION]) {
+        case AUDIO_POLICY_FORCE_BT_SCO:
+            // if SCO device is requested but no SCO device is available, fall back to default case
+            if (availableDeviceTypes & AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET) {
+                device = AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET;
+                break;
+            }
+            // FALL THROUGH
+
+        default:    // FORCE_NONE
+            if (availableDeviceTypes & AUDIO_DEVICE_IN_WIRED_HEADSET) {
+                device = AUDIO_DEVICE_IN_WIRED_HEADSET;
+            } else if (availableDeviceTypes & AUDIO_DEVICE_IN_USB_DEVICE) {
+                device = AUDIO_DEVICE_IN_USB_DEVICE;
+            } else if (availableDeviceTypes & AUDIO_DEVICE_IN_BUILTIN_MIC) {
+                device = AUDIO_DEVICE_IN_BUILTIN_MIC;
+            }
+            break;
+
+        case AUDIO_POLICY_FORCE_SPEAKER:
+            if (availableDeviceTypes & AUDIO_DEVICE_IN_BACK_MIC) {
+                device = AUDIO_DEVICE_IN_BACK_MIC;
+            } else if (availableDeviceTypes & AUDIO_DEVICE_IN_BUILTIN_MIC) {
+                device = AUDIO_DEVICE_IN_BUILTIN_MIC;
+            }
+            break;
+        }
+        break;
 
     case AUDIO_SOURCE_VOICE_RECOGNITION:
     case AUDIO_SOURCE_HOTWORD:
-    case AUDIO_SOURCE_VOICE_COMMUNICATION:
         if (mForceUse[AUDIO_POLICY_FORCE_FOR_RECORD] == AUDIO_POLICY_FORCE_BT_SCO &&
                 availableDeviceTypes & AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET) {
             device = AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET;
@@ -4198,6 +4484,18 @@
     return 0;
 }
 
+uint32_t AudioPolicyManager::activeInputsCount() const
+{
+    uint32_t count = 0;
+    for (size_t i = 0; i < mInputs.size(); i++) {
+        const sp<AudioInputDescriptor>  desc = mInputs.valueAt(i);
+        if (desc->mRefCount > 0) {
+            return count++;
+        }
+    }
+    return count;
+}
+
 
 audio_devices_t AudioPolicyManager::getDeviceForVolume(audio_devices_t device)
 {
@@ -4237,10 +4535,13 @@
         case AUDIO_DEVICE_OUT_BLUETOOTH_A2DP:
         case AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES:
             return DEVICE_CATEGORY_HEADSET;
+        case AUDIO_DEVICE_OUT_LINE:
+        case AUDIO_DEVICE_OUT_AUX_DIGITAL:
+        /*USB?  Remote submix?*/
+            return DEVICE_CATEGORY_EXT_MEDIA;
         case AUDIO_DEVICE_OUT_SPEAKER:
         case AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT:
         case AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER:
-        case AUDIO_DEVICE_OUT_AUX_DIGITAL:
         case AUDIO_DEVICE_OUT_USB_ACCESSORY:
         case AUDIO_DEVICE_OUT_USB_DEVICE:
         case AUDIO_DEVICE_OUT_REMOTE_SUBMIX:
@@ -4307,6 +4608,11 @@
 };
 
 const AudioPolicyManager::VolumeCurvePoint
+    AudioPolicyManager::sExtMediaSystemVolumeCurve[AudioPolicyManager::VOLCNT] = {
+    {1, -58.0f}, {20, -40.0f}, {60, -21.0f}, {100, -10.0f}
+};
+
+const AudioPolicyManager::VolumeCurvePoint
     AudioPolicyManager::sSpeakerMediaVolumeCurve[AudioPolicyManager::VOLCNT] = {
     {1, -56.0f}, {20, -34.0f}, {60, -11.0f}, {100, 0.0f}
 };
@@ -4362,52 +4668,62 @@
     { // AUDIO_STREAM_VOICE_CALL
         sDefaultVoiceVolumeCurve, // DEVICE_CATEGORY_HEADSET
         sSpeakerVoiceVolumeCurve, // DEVICE_CATEGORY_SPEAKER
-        sDefaultVoiceVolumeCurve  // DEVICE_CATEGORY_EARPIECE
+        sDefaultVoiceVolumeCurve, // DEVICE_CATEGORY_EARPIECE
+        sDefaultMediaVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
     },
     { // AUDIO_STREAM_SYSTEM
         sHeadsetSystemVolumeCurve, // DEVICE_CATEGORY_HEADSET
         sDefaultSystemVolumeCurve, // DEVICE_CATEGORY_SPEAKER
-        sDefaultSystemVolumeCurve  // DEVICE_CATEGORY_EARPIECE
+        sDefaultSystemVolumeCurve,  // DEVICE_CATEGORY_EARPIECE
+        sExtMediaSystemVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
     },
     { // AUDIO_STREAM_RING
         sDefaultVolumeCurve, // DEVICE_CATEGORY_HEADSET
         sSpeakerSonificationVolumeCurve, // DEVICE_CATEGORY_SPEAKER
-        sDefaultVolumeCurve  // DEVICE_CATEGORY_EARPIECE
+        sDefaultVolumeCurve,  // DEVICE_CATEGORY_EARPIECE
+        sExtMediaSystemVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
     },
     { // AUDIO_STREAM_MUSIC
         sDefaultMediaVolumeCurve, // DEVICE_CATEGORY_HEADSET
         sSpeakerMediaVolumeCurve, // DEVICE_CATEGORY_SPEAKER
-        sDefaultMediaVolumeCurve  // DEVICE_CATEGORY_EARPIECE
+        sDefaultMediaVolumeCurve, // DEVICE_CATEGORY_EARPIECE
+        sDefaultMediaVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
     },
     { // AUDIO_STREAM_ALARM
         sDefaultVolumeCurve, // DEVICE_CATEGORY_HEADSET
         sSpeakerSonificationVolumeCurve, // DEVICE_CATEGORY_SPEAKER
-        sDefaultVolumeCurve  // DEVICE_CATEGORY_EARPIECE
+        sDefaultVolumeCurve,  // DEVICE_CATEGORY_EARPIECE
+        sExtMediaSystemVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
     },
     { // AUDIO_STREAM_NOTIFICATION
         sDefaultVolumeCurve, // DEVICE_CATEGORY_HEADSET
         sSpeakerSonificationVolumeCurve, // DEVICE_CATEGORY_SPEAKER
-        sDefaultVolumeCurve  // DEVICE_CATEGORY_EARPIECE
+        sDefaultVolumeCurve,  // DEVICE_CATEGORY_EARPIECE
+        sExtMediaSystemVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
     },
     { // AUDIO_STREAM_BLUETOOTH_SCO
         sDefaultVoiceVolumeCurve, // DEVICE_CATEGORY_HEADSET
         sSpeakerVoiceVolumeCurve, // DEVICE_CATEGORY_SPEAKER
-        sDefaultVoiceVolumeCurve  // DEVICE_CATEGORY_EARPIECE
+        sDefaultVoiceVolumeCurve, // DEVICE_CATEGORY_EARPIECE
+        sDefaultMediaVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
     },
     { // AUDIO_STREAM_ENFORCED_AUDIBLE
         sHeadsetSystemVolumeCurve, // DEVICE_CATEGORY_HEADSET
         sDefaultSystemVolumeCurve, // DEVICE_CATEGORY_SPEAKER
-        sDefaultSystemVolumeCurve  // DEVICE_CATEGORY_EARPIECE
+        sDefaultSystemVolumeCurve, // DEVICE_CATEGORY_EARPIECE
+        sExtMediaSystemVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
     },
     {  // AUDIO_STREAM_DTMF
         sHeadsetSystemVolumeCurve, // DEVICE_CATEGORY_HEADSET
         sDefaultSystemVolumeCurve, // DEVICE_CATEGORY_SPEAKER
-        sDefaultSystemVolumeCurve  // DEVICE_CATEGORY_EARPIECE
+        sDefaultSystemVolumeCurve, // DEVICE_CATEGORY_EARPIECE
+        sExtMediaSystemVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
     },
     { // AUDIO_STREAM_TTS
         sDefaultMediaVolumeCurve, // DEVICE_CATEGORY_HEADSET
         sSpeakerMediaVolumeCurve, // DEVICE_CATEGORY_SPEAKER
-        sDefaultMediaVolumeCurve  // DEVICE_CATEGORY_EARPIECE
+        sDefaultMediaVolumeCurve, // DEVICE_CATEGORY_EARPIECE
+        sDefaultMediaVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
     },
 };
 
@@ -4883,7 +5199,7 @@
 AudioPolicyManager::AudioInputDescriptor::AudioInputDescriptor(const sp<IOProfile>& profile)
     : mId(0), mIoHandle(0),
       mDevice(AUDIO_DEVICE_NONE), mPatchHandle(0), mRefCount(0),
-      mInputSource(AUDIO_SOURCE_DEFAULT), mProfile(profile)
+      mInputSource(AUDIO_SOURCE_DEFAULT), mProfile(profile), mIsSoundTrigger(false)
 {
     if (profile != NULL) {
         mAudioPort = profile;
diff --git a/services/audiopolicy/AudioPolicyManager.h b/services/audiopolicy/AudioPolicyManager.h
index e28a362..95aab65 100644
--- a/services/audiopolicy/AudioPolicyManager.h
+++ b/services/audiopolicy/AudioPolicyManager.h
@@ -172,6 +172,12 @@
         virtual status_t setAudioPortConfig(const struct audio_port_config *config);
         virtual void clearAudioPatches(uid_t uid);
 
+        virtual status_t acquireSoundTriggerSession(audio_session_t *session,
+                                               audio_io_handle_t *ioHandle,
+                                               audio_devices_t *device);
+
+        virtual status_t releaseSoundTriggerSession(audio_session_t session);
+
 protected:
 
         enum routing_strategy {
@@ -202,6 +208,7 @@
             DEVICE_CATEGORY_HEADSET,
             DEVICE_CATEGORY_SPEAKER,
             DEVICE_CATEGORY_EARPIECE,
+            DEVICE_CATEGORY_EXT_MEDIA,
             DEVICE_CATEGORY_CNT
         };
 
@@ -408,6 +415,8 @@
         static const VolumeCurvePoint sDefaultVolumeCurve[AudioPolicyManager::VOLCNT];
         // default volume curve for media strategy
         static const VolumeCurvePoint sDefaultMediaVolumeCurve[AudioPolicyManager::VOLCNT];
+        // volume curve for non-media audio on ext media outputs (HDMI, Line, etc)
+        static const VolumeCurvePoint sExtMediaSystemVolumeCurve[AudioPolicyManager::VOLCNT];
         // volume curve for media strategy on speakers
         static const VolumeCurvePoint sSpeakerMediaVolumeCurve[AudioPolicyManager::VOLCNT];
         static const VolumeCurvePoint sSpeakerMediaVolumeCurveDrc[AudioPolicyManager::VOLCNT];
@@ -477,15 +486,18 @@
 
             status_t    dump(int fd);
 
-            audio_port_handle_t mId;
-            audio_io_handle_t mIoHandle;              // input handle
-            audio_devices_t mDevice;                    // current device this input is routed to
-            audio_patch_handle_t mPatchHandle;
-            uint32_t mRefCount;                         // number of AudioRecord clients using this output
-            uint32_t mOpenRefCount;
-            audio_source_t mInputSource;                // input source selected by application (mediarecorder.h)
-            const sp<IOProfile> mProfile;                  // I/O profile this output derives from
-            SortedVector<audio_session_t> mSessions;  // audio sessions attached to this input
+            audio_port_handle_t           mId;
+            audio_io_handle_t             mIoHandle;       // input handle
+            audio_devices_t               mDevice;         // current device this input is routed to
+            audio_patch_handle_t          mPatchHandle;
+            uint32_t                      mRefCount;       // number of AudioRecord clients using
+                                                           // this input
+            uint32_t                      mOpenRefCount;
+            audio_source_t                mInputSource;    // input source selected by application
+                                                           //(mediarecorder.h)
+            const sp<IOProfile>           mProfile;        // I/O profile this output derives from
+            SortedVector<audio_session_t> mSessions;       // audio sessions attached to this input
+            bool                          mIsSoundTrigger; // used by a soundtrigger capture
 
             virtual void toAudioPortConfig(struct audio_port_config *dstConfig,
                                    const struct audio_port_config *srcConfig = NULL) const;
@@ -569,6 +581,8 @@
         //    ignoreVirtualInputs is true.
         audio_io_handle_t getActiveInput(bool ignoreVirtualInputs = true);
 
+        uint32_t activeInputsCount() const;
+
         // initialize volume curves for each strategy and device category
         void initializeVolumeCurves();
 
@@ -713,6 +727,11 @@
         sp<AudioInputDescriptor> getInputFromId(audio_port_handle_t id) const;
         sp<HwModule> getModuleForDevice(audio_devices_t device) const;
         sp<HwModule> getModuleFromName(const char *name) const;
+        audio_devices_t availablePrimaryOutputDevices();
+        audio_devices_t availablePrimaryInputDevices();
+
+        void updateCallRouting(audio_devices_t rxDevice, int delayMs = 0);
+
         //
         // Audio policy configuration file parsing (audio_policy.conf)
         //
@@ -769,6 +788,11 @@
 
         DefaultKeyedVector<audio_patch_handle_t, sp<AudioPatch> > mAudioPatches;
 
+        DefaultKeyedVector<audio_session_t, audio_io_handle_t> mSoundTriggerSessions;
+
+        sp<AudioPatch> mCallTxPatch;
+        sp<AudioPatch> mCallRxPatch;
+
 #ifdef AUDIO_POLICY_TEST
         Mutex   mLock;
         Condition mWaitWorkCV;
diff --git a/services/audiopolicy/AudioPolicyService.h b/services/audiopolicy/AudioPolicyService.h
index 97236e3..0044e7a 100644
--- a/services/audiopolicy/AudioPolicyService.h
+++ b/services/audiopolicy/AudioPolicyService.h
@@ -168,6 +168,12 @@
 
     virtual void registerClient(const sp<IAudioPolicyServiceClient>& client);
 
+    virtual status_t acquireSoundTriggerSession(audio_session_t *session,
+                                           audio_io_handle_t *ioHandle,
+                                           audio_devices_t *device);
+
+    virtual status_t releaseSoundTriggerSession(audio_session_t session);
+
             status_t doStopOutput(audio_io_handle_t output,
                                   audio_stream_type_t stream,
                                   int session = 0);
diff --git a/services/camera/libcameraservice/api1/Camera2Client.cpp b/services/camera/libcameraservice/api1/Camera2Client.cpp
index 9721e13..046988e 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.cpp
+++ b/services/camera/libcameraservice/api1/Camera2Client.cpp
@@ -810,7 +810,9 @@
             return res;
         }
     }
-    if (params.zslMode && !params.recordingHint) {
+
+    if (params.zslMode && !params.recordingHint &&
+            getRecordingStreamId() == NO_STREAM) {
         res = updateProcessorStream(mZslProcessor, params);
         if (res != OK) {
             ALOGE("%s: Camera %d: Unable to update ZSL stream: %s (%d)",
@@ -1033,6 +1035,36 @@
             return res;
         }
     }
+
+    if (mZslProcessor->getStreamId() != NO_STREAM) {
+        ALOGV("%s: Camera %d: Clearing out zsl stream before "
+                "creating recording stream", __FUNCTION__, mCameraId);
+        res = mStreamingProcessor->stopStream();
+        if (res != OK) {
+            ALOGE("%s: Camera %d: Can't stop streaming to delete callback stream",
+                    __FUNCTION__, mCameraId);
+            return res;
+        }
+        res = mDevice->waitUntilDrained();
+        if (res != OK) {
+            ALOGE("%s: Camera %d: Waiting to stop streaming failed: %s (%d)",
+                    __FUNCTION__, mCameraId, strerror(-res), res);
+        }
+        res = mZslProcessor->clearZslQueue();
+        if (res != OK) {
+            ALOGE("%s: Camera %d: Can't clear zsl queue",
+                    __FUNCTION__, mCameraId);
+            return res;
+        }
+        res = mZslProcessor->deleteStream();
+        if (res != OK) {
+            ALOGE("%s: Camera %d: Unable to delete zsl stream before "
+                    "record: %s (%d)", __FUNCTION__, mCameraId,
+                    strerror(-res), res);
+            return res;
+        }
+    }
+
     // Disable callbacks if they're enabled; can't record and use callbacks,
     // and we can't fail record start without stagefright asserting.
     params.previewCallbackFlags = 0;
diff --git a/services/soundtrigger/Android.mk b/services/soundtrigger/Android.mk
index 51eb845..572ae56 100644
--- a/services/soundtrigger/Android.mk
+++ b/services/soundtrigger/Android.mk
@@ -31,7 +31,8 @@
     libbinder \
     libcutils \
     libhardware \
-    libsoundtrigger
+    libsoundtrigger \
+    libmedia
 
 LOCAL_STATIC_LIBRARIES := \
     libserviceutility
diff --git a/services/soundtrigger/SoundTriggerHwService.cpp b/services/soundtrigger/SoundTriggerHwService.cpp
index 3654136..2502e0d 100644
--- a/services/soundtrigger/SoundTriggerHwService.cpp
+++ b/services/soundtrigger/SoundTriggerHwService.cpp
@@ -25,12 +25,13 @@
 #include <system/sound_trigger.h>
 #include <cutils/atomic.h>
 #include <cutils/properties.h>
+#include <hardware/hardware.h>
+#include <media/AudioSystem.h>
 #include <utils/Errors.h>
 #include <utils/Log.h>
 #include <binder/IServiceManager.h>
 #include <binder/MemoryBase.h>
 #include <binder/MemoryHeapBase.h>
-#include <hardware/hardware.h>
 #include <hardware/sound_trigger.h>
 #include <ServiceUtilities.h>
 #include "SoundTriggerHwService.h"
@@ -45,7 +46,9 @@
 
 SoundTriggerHwService::SoundTriggerHwService()
     : BnSoundTriggerHwService(),
-      mNextUniqueId(1)
+      mNextUniqueId(1),
+      mMemoryDealer(new MemoryDealer(1024 * 1024, "SoundTriggerHwService")),
+      mCaptureState(false)
 {
 }
 
@@ -143,15 +146,31 @@
     client->asBinder()->linkToDeath(module);
     moduleInterface = module;
 
+    module->setCaptureState_l(mCaptureState);
+
     return NO_ERROR;
 }
 
-void SoundTriggerHwService::detachModule(sp<Module> module) {
+status_t SoundTriggerHwService::setCaptureState(bool active)
+{
+    ALOGV("setCaptureState %d", active);
+    AutoMutex lock(mServiceLock);
+    mCaptureState = active;
+    for (size_t i = 0; i < mModules.size(); i++) {
+        mModules.valueAt(i)->setCaptureState_l(active);
+    }
+    return NO_ERROR;
+}
+
+
+void SoundTriggerHwService::detachModule(sp<Module> module)
+{
     ALOGV("detachModule");
     AutoMutex lock(mServiceLock);
     module->clearClient();
 }
 
+
 static const int kDumpLockRetries = 50;
 static const int kDumpLockSleep = 60000;
 
@@ -200,18 +219,175 @@
     if (module == NULL) {
         return;
     }
-    module->sendRecognitionEvent(event);
+    sp<SoundTriggerHwService> service = module->service().promote();
+    if (service == 0) {
+        return;
+    }
+
+    service->sendRecognitionEvent(event, module);
+}
+
+sp<IMemory> SoundTriggerHwService::prepareRecognitionEvent_l(
+                                                    struct sound_trigger_recognition_event *event)
+{
+    sp<IMemory> eventMemory;
+
+    //sanitize event
+    switch (event->type) {
+    case SOUND_MODEL_TYPE_KEYPHRASE:
+        ALOGW_IF(event->data_size != 0 && event->data_offset !=
+                    sizeof(struct sound_trigger_phrase_recognition_event),
+                    "prepareRecognitionEvent_l(): invalid data offset %u for keyphrase event type",
+                    event->data_offset);
+        event->data_offset = sizeof(struct sound_trigger_phrase_recognition_event);
+        break;
+    case SOUND_MODEL_TYPE_UNKNOWN:
+        ALOGW_IF(event->data_size != 0 && event->data_offset !=
+                    sizeof(struct sound_trigger_recognition_event),
+                    "prepareRecognitionEvent_l(): invalid data offset %u for unknown event type",
+                    event->data_offset);
+        event->data_offset = sizeof(struct sound_trigger_recognition_event);
+        break;
+    default:
+            return eventMemory;
+    }
+
+    size_t size = event->data_offset + event->data_size;
+    eventMemory = mMemoryDealer->allocate(size);
+    if (eventMemory == 0 || eventMemory->pointer() == NULL) {
+        eventMemory.clear();
+        return eventMemory;
+    }
+    memcpy(eventMemory->pointer(), event, size);
+
+    return eventMemory;
+}
+
+void SoundTriggerHwService::sendRecognitionEvent(struct sound_trigger_recognition_event *event,
+                                                 Module *module)
+ {
+     AutoMutex lock(mServiceLock);
+     if (module == NULL) {
+         return;
+     }
+     sp<IMemory> eventMemory = prepareRecognitionEvent_l(event);
+     if (eventMemory == 0) {
+         return;
+     }
+     sp<Module> strongModule;
+     for (size_t i = 0; i < mModules.size(); i++) {
+         if (mModules.valueAt(i).get() == module) {
+             strongModule = mModules.valueAt(i);
+             break;
+         }
+     }
+     if (strongModule == 0) {
+         return;
+     }
+
+     sendCallbackEvent_l(new CallbackEvent(CallbackEvent::TYPE_RECOGNITION,
+                                                  eventMemory, strongModule));
+}
+
+// static
+void SoundTriggerHwService::soundModelCallback(struct sound_trigger_model_event *event,
+                                               void *cookie)
+{
+    Module *module = (Module *)cookie;
+    if (module == NULL) {
+        return;
+    }
+    sp<SoundTriggerHwService> service = module->service().promote();
+    if (service == 0) {
+        return;
+    }
+
+    service->sendSoundModelEvent(event, module);
+}
+
+sp<IMemory> SoundTriggerHwService::prepareSoundModelEvent_l(struct sound_trigger_model_event *event)
+{
+    sp<IMemory> eventMemory;
+
+    size_t size = event->data_offset + event->data_size;
+    eventMemory = mMemoryDealer->allocate(size);
+    if (eventMemory == 0 || eventMemory->pointer() == NULL) {
+        eventMemory.clear();
+        return eventMemory;
+    }
+    memcpy(eventMemory->pointer(), event, size);
+
+    return eventMemory;
+}
+
+void SoundTriggerHwService::sendSoundModelEvent(struct sound_trigger_model_event *event,
+                                                Module *module)
+{
+    AutoMutex lock(mServiceLock);
+    sp<IMemory> eventMemory = prepareSoundModelEvent_l(event);
+    if (eventMemory == 0) {
+        return;
+    }
+    sp<Module> strongModule;
+    for (size_t i = 0; i < mModules.size(); i++) {
+        if (mModules.valueAt(i).get() == module) {
+            strongModule = mModules.valueAt(i);
+            break;
+        }
+    }
+    if (strongModule == 0) {
+        return;
+    }
+    sendCallbackEvent_l(new CallbackEvent(CallbackEvent::TYPE_SOUNDMODEL,
+                                                 eventMemory, strongModule));
 }
 
 
-void SoundTriggerHwService::sendRecognitionEvent(const sp<RecognitionEvent>& event)
+sp<IMemory> SoundTriggerHwService::prepareServiceStateEvent_l(sound_trigger_service_state_t state)
 {
-    mCallbackThread->sendRecognitionEvent(event);
+    sp<IMemory> eventMemory;
+
+    size_t size = sizeof(sound_trigger_service_state_t);
+    eventMemory = mMemoryDealer->allocate(size);
+    if (eventMemory == 0 || eventMemory->pointer() == NULL) {
+        eventMemory.clear();
+        return eventMemory;
+    }
+    *((sound_trigger_service_state_t *)eventMemory->pointer()) = state;
+    return eventMemory;
 }
 
-void SoundTriggerHwService::onRecognitionEvent(const sp<RecognitionEvent>& event)
+// call with mServiceLock held
+void SoundTriggerHwService::sendServiceStateEvent_l(sound_trigger_service_state_t state,
+                                                  Module *module)
 {
-    ALOGV("onRecognitionEvent");
+    sp<IMemory> eventMemory = prepareServiceStateEvent_l(state);
+    if (eventMemory == 0) {
+        return;
+    }
+    sp<Module> strongModule;
+    for (size_t i = 0; i < mModules.size(); i++) {
+        if (mModules.valueAt(i).get() == module) {
+            strongModule = mModules.valueAt(i);
+            break;
+        }
+    }
+    if (strongModule == 0) {
+        return;
+    }
+    sendCallbackEvent_l(new CallbackEvent(CallbackEvent::TYPE_SERVICE_STATE,
+                                                 eventMemory, strongModule));
+}
+
+// call with mServiceLock held
+void SoundTriggerHwService::sendCallbackEvent_l(const sp<CallbackEvent>& event)
+{
+    mCallbackThread->sendCallbackEvent(event);
+}
+
+void SoundTriggerHwService::onCallbackEvent(const sp<CallbackEvent>& event)
+{
+    ALOGV("onCallbackEvent");
     sp<Module> module;
     {
         AutoMutex lock(mServiceLock);
@@ -220,15 +396,12 @@
             return;
         }
     }
-    module->onRecognitionEvent(event->mEventMemory);
-}
-
-// static
-void SoundTriggerHwService::soundModelCallback(struct sound_trigger_model_event *event __unused,
-                                               void *cookie)
-{
-    Module *module = (Module *)cookie;
-
+    module->onCallbackEvent(event);
+    {
+        AutoMutex lock(mServiceLock);
+        // clear now to execute with mServiceLock locked
+        event->mMemory.clear();
+    }
 }
 
 #undef LOG_TAG
@@ -241,7 +414,10 @@
 
 SoundTriggerHwService::CallbackThread::~CallbackThread()
 {
-    mEventQueue.clear();
+    while (!mEventQueue.isEmpty()) {
+        mEventQueue[0]->mMemory.clear();
+        mEventQueue.removeAt(0);
+    }
 }
 
 void SoundTriggerHwService::CallbackThread::onFirstRef()
@@ -252,7 +428,7 @@
 bool SoundTriggerHwService::CallbackThread::threadLoop()
 {
     while (!exitPending()) {
-        sp<RecognitionEvent> event;
+        sp<CallbackEvent> event;
         sp<SoundTriggerHwService> service;
         {
             Mutex::Autolock _l(mCallbackLock);
@@ -269,7 +445,7 @@
             service = mService.promote();
         }
         if (service != 0) {
-            service->onRecognitionEvent(event);
+            service->onCallbackEvent(event);
         }
     }
     return false;
@@ -282,25 +458,25 @@
     mCallbackCond.broadcast();
 }
 
-void SoundTriggerHwService::CallbackThread::sendRecognitionEvent(
-                        const sp<SoundTriggerHwService::RecognitionEvent>& event)
+void SoundTriggerHwService::CallbackThread::sendCallbackEvent(
+                        const sp<SoundTriggerHwService::CallbackEvent>& event)
 {
     AutoMutex lock(mCallbackLock);
     mEventQueue.add(event);
     mCallbackCond.signal();
 }
 
-SoundTriggerHwService::RecognitionEvent::RecognitionEvent(
-                                            sp<IMemory> eventMemory,
-                                            wp<Module> module)
-    : mEventMemory(eventMemory), mModule(module)
+SoundTriggerHwService::CallbackEvent::CallbackEvent(event_type type, sp<IMemory> memory,
+                                                    wp<Module> module)
+    : mType(type), mMemory(memory), mModule(module)
 {
 }
 
-SoundTriggerHwService::RecognitionEvent::~RecognitionEvent()
+SoundTriggerHwService::CallbackEvent::~CallbackEvent()
 {
 }
 
+
 #undef LOG_TAG
 #define LOG_TAG "SoundTriggerHwService::Module"
 
@@ -309,7 +485,7 @@
                                       sound_trigger_module_descriptor descriptor,
                                       const sp<ISoundTriggerClient>& client)
  : mService(service), mHwDevice(hwDevice), mDescriptor(descriptor),
-   mClient(client)
+   mClient(client), mServiceState(SOUND_TRIGGER_STATE_NO_INIT)
 {
 }
 
@@ -328,7 +504,6 @@
             ALOGV("detach() unloading model %d", model->mHandle);
             if (model->mState == Model::STATE_ACTIVE) {
                 mHwDevice->stop_recognition(mHwDevice, model->mHandle);
-                model->deallocateMemory();
             }
             mHwDevice->unload_sound_model(mHwDevice, model->mHandle);
         }
@@ -365,9 +540,20 @@
                                                   SoundTriggerHwService::soundModelCallback,
                                                   this,
                                                   handle);
-    if (status == NO_ERROR) {
-        mModels.replaceValueFor(*handle, new Model(*handle));
+    if (status != NO_ERROR) {
+        return status;
     }
+    audio_session_t session;
+    audio_io_handle_t ioHandle;
+    audio_devices_t device;
+
+    status = AudioSystem::acquireSoundTriggerSession(&session, &ioHandle, &device);
+    if (status != NO_ERROR) {
+        return status;
+    }
+
+    sp<Model> model = new Model(*handle, session, ioHandle, device, sound_model->type);
+    mModels.replaceValueFor(*handle, model);
 
     return status;
 }
@@ -388,8 +574,8 @@
     mModels.removeItem(handle);
     if (model->mState == Model::STATE_ACTIVE) {
         mHwDevice->stop_recognition(mHwDevice, model->mHandle);
-        model->deallocateMemory();
     }
+    AudioSystem::releaseSoundTriggerSession(model->mCaptureSession);
     return mHwDevice->unload_sound_model(mHwDevice, handle);
 }
 
@@ -407,6 +593,9 @@
 
     }
     AutoMutex lock(mLock);
+    if (mServiceState == SOUND_TRIGGER_STATE_DISABLED) {
+        return INVALID_OPERATION;
+    }
     sp<Model> model = getModel(handle);
     if (model == 0) {
         return BAD_VALUE;
@@ -419,17 +608,23 @@
     if (model->mState == Model::STATE_ACTIVE) {
         return INVALID_OPERATION;
     }
-    model->mState = Model::STATE_ACTIVE;
 
     struct sound_trigger_recognition_config *config =
             (struct sound_trigger_recognition_config *)dataMemory->pointer();
 
     //TODO: get capture handle and device from audio policy service
-    config->capture_handle = AUDIO_IO_HANDLE_NONE;
-    config->capture_device = AUDIO_DEVICE_NONE;
-    return mHwDevice->start_recognition(mHwDevice, handle, config,
+    config->capture_handle = model->mCaptureIOHandle;
+    config->capture_device = model->mCaptureDevice;
+    status_t status = mHwDevice->start_recognition(mHwDevice, handle, config,
                                         SoundTriggerHwService::recognitionCallback,
                                         this);
+
+    if (status == NO_ERROR) {
+        model->mState = Model::STATE_ACTIVE;
+        model->mConfig = *config;
+    }
+
+    return status;
 }
 
 status_t SoundTriggerHwService::Module::stopRecognition(sound_model_handle_t handle)
@@ -449,93 +644,62 @@
         return INVALID_OPERATION;
     }
     mHwDevice->stop_recognition(mHwDevice, handle);
-    model->deallocateMemory();
     model->mState = Model::STATE_IDLE;
     return NO_ERROR;
 }
 
-void SoundTriggerHwService::Module::sendRecognitionEvent(
-                                                    struct sound_trigger_recognition_event *event)
+
+void SoundTriggerHwService::Module::onCallbackEvent(const sp<CallbackEvent>& event)
 {
-    sp<SoundTriggerHwService> service;
-    sp<IMemory> eventMemory;
-    ALOGV("sendRecognitionEvent for model %d", event->model);
-    {
-        AutoMutex lock(mLock);
-        sp<Model> model = getModel(event->model);
-        if (model == 0) {
-            return;
-        }
-        if (model->mState != Model::STATE_ACTIVE) {
-            ALOGV("sendRecognitionEvent model->mState %d != Model::STATE_ACTIVE", model->mState);
-            return;
-        }
-        if (mClient == 0) {
-            return;
-        }
-        service = mService.promote();
-        if (service == 0) {
-            return;
-        }
-
-        //sanitize event
-        switch (event->type) {
-        case SOUND_MODEL_TYPE_KEYPHRASE:
-            ALOGW_IF(event->data_offset !=
-                        sizeof(struct sound_trigger_phrase_recognition_event),
-                        "sendRecognitionEvent(): invalid data offset %u for keyphrase event type",
-                        event->data_offset);
-            event->data_offset = sizeof(struct sound_trigger_phrase_recognition_event);
-            break;
-        case SOUND_MODEL_TYPE_UNKNOWN:
-            ALOGW_IF(event->data_offset !=
-                        sizeof(struct sound_trigger_recognition_event),
-                        "sendRecognitionEvent(): invalid data offset %u for unknown event type",
-                        event->data_offset);
-            event->data_offset = sizeof(struct sound_trigger_recognition_event);
-            break;
-        default:
-                return;
-        }
-
-        size_t size = event->data_offset + event->data_size;
-        eventMemory = model->allocateMemory(size);
-        if (eventMemory == 0 || eventMemory->pointer() == NULL) {
-            return;
-        }
-        memcpy(eventMemory->pointer(), event, size);
-    }
-    service->sendRecognitionEvent(new RecognitionEvent(eventMemory, this));
-}
-
-void SoundTriggerHwService::Module::onRecognitionEvent(sp<IMemory> eventMemory)
-{
-    ALOGV("Module::onRecognitionEvent");
+    ALOGV("onCallbackEvent type %d", event->mType);
 
     AutoMutex lock(mLock);
+    sp<IMemory> eventMemory = event->mMemory;
 
     if (eventMemory == 0 || eventMemory->pointer() == NULL) {
         return;
     }
-    struct sound_trigger_recognition_event *event =
-            (struct sound_trigger_recognition_event *)eventMemory->pointer();
-
-    sp<Model> model = getModel(event->model);
-    if (model == 0) {
-        ALOGI("%s model == 0", __func__);
-        return;
-    }
-    if (model->mState != Model::STATE_ACTIVE) {
-        ALOGV("onRecognitionEvent model->mState %d != Model::STATE_ACTIVE", model->mState);
-        return;
-    }
     if (mClient == 0) {
         ALOGI("%s mClient == 0", __func__);
         return;
     }
-    mClient->onRecognitionEvent(eventMemory);
-    model->mState = Model::STATE_IDLE;
-    model->deallocateMemory();
+
+    switch (event->mType) {
+    case CallbackEvent::TYPE_RECOGNITION: {
+        struct sound_trigger_recognition_event *recognitionEvent =
+                (struct sound_trigger_recognition_event *)eventMemory->pointer();
+
+        sp<Model> model = getModel(recognitionEvent->model);
+        if (model == 0) {
+            ALOGW("%s model == 0", __func__);
+            return;
+        }
+        if (model->mState != Model::STATE_ACTIVE) {
+            ALOGV("onCallbackEvent model->mState %d != Model::STATE_ACTIVE", model->mState);
+            return;
+        }
+
+        recognitionEvent->capture_session = model->mCaptureSession;
+        mClient->onRecognitionEvent(eventMemory);
+        model->mState = Model::STATE_IDLE;
+    } break;
+    case CallbackEvent::TYPE_SOUNDMODEL: {
+        struct sound_trigger_model_event *soundmodelEvent =
+                (struct sound_trigger_model_event *)eventMemory->pointer();
+
+        sp<Model> model = getModel(soundmodelEvent->model);
+        if (model == 0) {
+            ALOGW("%s model == 0", __func__);
+            return;
+        }
+        mClient->onSoundModelEvent(eventMemory);
+    } break;
+    case CallbackEvent::TYPE_SERVICE_STATE: {
+        mClient->onServiceStateChange(eventMemory);
+    } break;
+    default:
+        LOG_ALWAYS_FATAL("onCallbackEvent unknown event type %d", event->mType);
+    }
 }
 
 sp<SoundTriggerHwService::Model> SoundTriggerHwService::Module::getModel(
@@ -555,30 +719,80 @@
     detach();
 }
 
-
-SoundTriggerHwService::Model::Model(sound_model_handle_t handle) :
-    mHandle(handle), mState(STATE_IDLE), mInputHandle(AUDIO_IO_HANDLE_NONE),
-    mCaptureSession(AUDIO_SESSION_ALLOCATE),
-    mMemoryDealer(new MemoryDealer(sizeof(struct sound_trigger_recognition_event),
-                                   "SoundTriggerHwService::Event"))
+// Called with mServiceLock held
+void SoundTriggerHwService::Module::setCaptureState_l(bool active)
 {
+    ALOGV("Module::setCaptureState_l %d", active);
+    sp<SoundTriggerHwService> service;
+    sound_trigger_service_state_t state;
 
-}
+    Vector< sp<IMemory> > events;
+    {
+        AutoMutex lock(mLock);
+        state = (active && !mDescriptor.properties.concurrent_capture) ?
+                                        SOUND_TRIGGER_STATE_DISABLED : SOUND_TRIGGER_STATE_ENABLED;
 
+        if (state == mServiceState) {
+            return;
+        }
 
-sp<IMemory> SoundTriggerHwService::Model::allocateMemory(size_t size)
-{
-    sp<IMemory> memory;
-    if (mMemoryDealer->getMemoryHeap()->getSize() < size) {
-        mMemoryDealer = new MemoryDealer(size, "SoundTriggerHwService::Event");
+        mServiceState = state;
+
+        service = mService.promote();
+        if (service == 0) {
+            return;
+        }
+
+        if (state == SOUND_TRIGGER_STATE_ENABLED) {
+            goto exit;
+        }
+
+        for (size_t i = 0; i < mModels.size(); i++) {
+            sp<Model> model = mModels.valueAt(i);
+            if (model->mState == Model::STATE_ACTIVE) {
+                mHwDevice->stop_recognition(mHwDevice, model->mHandle);
+                // keep model in ACTIVE state so that event is processed by onCallbackEvent()
+                struct sound_trigger_phrase_recognition_event phraseEvent;
+                switch (model->mType) {
+                case SOUND_MODEL_TYPE_KEYPHRASE:
+                    phraseEvent.num_phrases = model->mConfig.num_phrases;
+                    for (size_t i = 0; i < phraseEvent.num_phrases; i++) {
+                        phraseEvent.phrase_extras[i] = model->mConfig.phrases[i];
+                    }
+                    break;
+                case SOUND_MODEL_TYPE_UNKNOWN:
+                default:
+                    break;
+                }
+                phraseEvent.common.status = RECOGNITION_STATUS_ABORT;
+                phraseEvent.common.type = model->mType;
+                phraseEvent.common.model = model->mHandle;
+                phraseEvent.common.data_size = 0;
+                sp<IMemory> eventMemory = service->prepareRecognitionEvent_l(&phraseEvent.common);
+                if (eventMemory != 0) {
+                    events.add(eventMemory);
+                }
+            }
+        }
     }
-    memory = mMemoryDealer->allocate(size);
-    return memory;
+
+    for (size_t i = 0; i < events.size(); i++) {
+        service->sendCallbackEvent_l(new CallbackEvent(CallbackEvent::TYPE_RECOGNITION, events[i],
+                                                     this));
+    }
+
+exit:
+    service->sendServiceStateEvent_l(state, this);
 }
 
-void SoundTriggerHwService::Model::deallocateMemory()
+
+SoundTriggerHwService::Model::Model(sound_model_handle_t handle, audio_session_t session,
+                                    audio_io_handle_t ioHandle, audio_devices_t device,
+                                    sound_trigger_sound_model_type_t type) :
+    mHandle(handle), mState(STATE_IDLE), mCaptureSession(session),
+    mCaptureIOHandle(ioHandle), mCaptureDevice(device), mType(type)
 {
-    mMemoryDealer->deallocate(0);
+
 }
 
 status_t SoundTriggerHwService::Module::dump(int fd __unused,
diff --git a/services/soundtrigger/SoundTriggerHwService.h b/services/soundtrigger/SoundTriggerHwService.h
index 377f2a1..d05dacd 100644
--- a/services/soundtrigger/SoundTriggerHwService.h
+++ b/services/soundtrigger/SoundTriggerHwService.h
@@ -53,6 +53,8 @@
                             const sp<ISoundTriggerClient>& client,
                             sp<ISoundTrigger>& module);
 
+    virtual status_t setCaptureState(bool active);
+
     virtual status_t    onTransact(uint32_t code, const Parcel& data,
                                    Parcel* reply, uint32_t flags);
 
@@ -66,17 +68,33 @@
             STATE_ACTIVE
         };
 
-        Model(sound_model_handle_t handle);
+        Model(sound_model_handle_t handle, audio_session_t session, audio_io_handle_t ioHandle,
+              audio_devices_t device, sound_trigger_sound_model_type_t type);
         ~Model() {}
 
-        sp<IMemory> allocateMemory(size_t size);
-        void deallocateMemory();
-
         sound_model_handle_t    mHandle;
         int                     mState;
-        audio_io_handle_t       mInputHandle;
         audio_session_t         mCaptureSession;
-        sp<MemoryDealer>        mMemoryDealer;
+        audio_io_handle_t       mCaptureIOHandle;
+        audio_devices_t         mCaptureDevice;
+        sound_trigger_sound_model_type_t mType;
+        struct sound_trigger_recognition_config mConfig;
+    };
+
+    class CallbackEvent : public RefBase {
+    public:
+        typedef enum {
+            TYPE_RECOGNITION,
+            TYPE_SOUNDMODEL,
+            TYPE_SERVICE_STATE,
+        } event_type;
+        CallbackEvent(event_type type, sp<IMemory> memory, wp<Module> module);
+
+        virtual             ~CallbackEvent();
+
+        event_type mType;
+        sp<IMemory> mMemory;
+        wp<Module> mModule;
     };
 
     class Module : public virtual RefBase,
@@ -109,36 +127,29 @@
        struct sound_trigger_module_descriptor descriptor() { return mDescriptor; }
        void setClient(sp<ISoundTriggerClient> client) { mClient = client; }
        void clearClient() { mClient.clear(); }
-       sp<ISoundTriggerClient> client() { return mClient; }
+       sp<ISoundTriggerClient> client() const { return mClient; }
+       wp<SoundTriggerHwService> service() const { return mService; }
 
-       void sendRecognitionEvent(struct sound_trigger_recognition_event *event);
-       void onRecognitionEvent(sp<IMemory> eventMemory);
+       void onCallbackEvent(const sp<CallbackEvent>& event);
 
        sp<Model> getModel(sound_model_handle_t handle);
 
+       void setCaptureState_l(bool active);
+
        // IBinder::DeathRecipient implementation
        virtual void        binderDied(const wp<IBinder> &who);
 
     private:
+
         Mutex                                  mLock;
         wp<SoundTriggerHwService>              mService;
         struct sound_trigger_hw_device*        mHwDevice;
         struct sound_trigger_module_descriptor mDescriptor;
         sp<ISoundTriggerClient>                mClient;
         DefaultKeyedVector< sound_model_handle_t, sp<Model> >     mModels;
+        sound_trigger_service_state_t          mServiceState;
     }; // class Module
 
-    class RecognitionEvent : public RefBase {
-    public:
-
-        RecognitionEvent(sp<IMemory> eventMemory, wp<Module> module);
-
-        virtual             ~RecognitionEvent();
-
-        sp<IMemory> mEventMemory;
-        wp<Module> mModule;
-    };
-
     class CallbackThread : public Thread {
     public:
 
@@ -153,22 +164,30 @@
         virtual void        onFirstRef();
 
                 void        exit();
-                void        sendRecognitionEvent(const sp<RecognitionEvent>& event);
+                void        sendCallbackEvent(const sp<CallbackEvent>& event);
 
     private:
         wp<SoundTriggerHwService>   mService;
         Condition                   mCallbackCond;
         Mutex                       mCallbackLock;
-        Vector< sp<RecognitionEvent> > mEventQueue;
+        Vector< sp<CallbackEvent> > mEventQueue;
     };
 
-    void detachModule(sp<Module> module);
+           void detachModule(sp<Module> module);
 
     static void recognitionCallback(struct sound_trigger_recognition_event *event, void *cookie);
-    void sendRecognitionEvent(const sp<RecognitionEvent>& event);
-    void onRecognitionEvent(const sp<RecognitionEvent>& event);
+           sp<IMemory> prepareRecognitionEvent_l(struct sound_trigger_recognition_event *event);
+           void sendRecognitionEvent(struct sound_trigger_recognition_event *event, Module *module);
 
     static void soundModelCallback(struct sound_trigger_model_event *event, void *cookie);
+           sp<IMemory> prepareSoundModelEvent_l(struct sound_trigger_model_event *event);
+           void sendSoundModelEvent(struct sound_trigger_model_event *event, Module *module);
+
+           sp<IMemory> prepareServiceStateEvent_l(sound_trigger_service_state_t state);
+           void sendServiceStateEvent_l(sound_trigger_service_state_t state, Module *module);
+
+           void sendCallbackEvent_l(const sp<CallbackEvent>& event);
+           void onCallbackEvent(const sp<CallbackEvent>& event);
 
 private:
 
@@ -178,6 +197,8 @@
     volatile int32_t    mNextUniqueId;
     DefaultKeyedVector< sound_trigger_module_handle_t, sp<Module> >     mModules;
     sp<CallbackThread>  mCallbackThread;
+    sp<MemoryDealer>    mMemoryDealer;
+    bool                mCaptureState;
 };
 
 } // namespace android
diff --git a/soundtrigger/ISoundTriggerClient.cpp b/soundtrigger/ISoundTriggerClient.cpp
index 1d0c0ec..b0b4428 100644
--- a/soundtrigger/ISoundTriggerClient.cpp
+++ b/soundtrigger/ISoundTriggerClient.cpp
@@ -27,6 +27,8 @@
 
 enum {
     ON_RECOGNITION_EVENT = IBinder::FIRST_CALL_TRANSACTION,
+    ON_SOUNDMODEL_EVENT,
+    ON_SERVICE_STATE_CHANGE
 };
 
 class BpSoundTriggerClient: public BpInterface<ISoundTriggerClient>
@@ -47,6 +49,25 @@
                            data,
                            &reply);
     }
+
+    virtual void onSoundModelEvent(const sp<IMemory>& eventMemory)
+    {
+        Parcel data, reply;
+        data.writeInterfaceToken(ISoundTriggerClient::getInterfaceDescriptor());
+        data.writeStrongBinder(eventMemory->asBinder());
+        remote()->transact(ON_SOUNDMODEL_EVENT,
+                           data,
+                           &reply);
+    }
+    virtual void onServiceStateChange(const sp<IMemory>& eventMemory)
+    {
+        Parcel data, reply;
+        data.writeInterfaceToken(ISoundTriggerClient::getInterfaceDescriptor());
+        data.writeStrongBinder(eventMemory->asBinder());
+        remote()->transact(ON_SERVICE_STATE_CHANGE,
+                           data,
+                           &reply);
+    }
 };
 
 IMPLEMENT_META_INTERFACE(SoundTriggerClient,
@@ -65,6 +86,20 @@
             onRecognitionEvent(eventMemory);
             return NO_ERROR;
         } break;
+        case ON_SOUNDMODEL_EVENT: {
+            CHECK_INTERFACE(ISoundTriggerClient, data, reply);
+            sp<IMemory> eventMemory = interface_cast<IMemory>(
+                data.readStrongBinder());
+            onSoundModelEvent(eventMemory);
+            return NO_ERROR;
+        } break;
+        case ON_SERVICE_STATE_CHANGE: {
+            CHECK_INTERFACE(ISoundTriggerClient, data, reply);
+            sp<IMemory> eventMemory = interface_cast<IMemory>(
+                data.readStrongBinder());
+            onServiceStateChange(eventMemory);
+            return NO_ERROR;
+        } break;
         default:
             return BBinder::onTransact(code, data, reply, flags);
     }
diff --git a/soundtrigger/ISoundTriggerHwService.cpp b/soundtrigger/ISoundTriggerHwService.cpp
index c9a0c24..05728e9 100644
--- a/soundtrigger/ISoundTriggerHwService.cpp
+++ b/soundtrigger/ISoundTriggerHwService.cpp
@@ -37,6 +37,7 @@
 enum {
     LIST_MODULES = IBinder::FIRST_CALL_TRANSACTION,
     ATTACH,
+    SET_CAPTURE_STATE,
 };
 
 class BpSoundTriggerHwService: public BpInterface<ISoundTriggerHwService>
@@ -90,6 +91,18 @@
         return status;
     }
 
+    virtual status_t setCaptureState(bool active)
+    {
+        Parcel data, reply;
+        data.writeInterfaceToken(ISoundTriggerHwService::getInterfaceDescriptor());
+        data.writeInt32(active);
+        status_t status = remote()->transact(SET_CAPTURE_STATE, data, &reply);
+        if (status == NO_ERROR) {
+            status = reply.readInt32();
+        }
+        return status;
+    }
+
 };
 
 IMPLEMENT_META_INTERFACE(SoundTriggerHwService, "android.hardware.ISoundTriggerHwService");
@@ -140,6 +153,13 @@
             }
             return NO_ERROR;
         } break;
+
+        case SET_CAPTURE_STATE: {
+            CHECK_INTERFACE(ISoundTriggerHwService, data, reply);
+            reply->writeInt32(setCaptureState((bool)data.readInt32()));
+            return NO_ERROR;
+        } break;
+
         default:
             return BBinder::onTransact(code, data, reply, flags);
     }
diff --git a/soundtrigger/SoundTrigger.cpp b/soundtrigger/SoundTrigger.cpp
index e43acd0..0015c30 100644
--- a/soundtrigger/SoundTrigger.cpp
+++ b/soundtrigger/SoundTrigger.cpp
@@ -113,6 +113,16 @@
 }
 
 
+status_t SoundTrigger::setCaptureState(bool active)
+{
+    ALOGV("setCaptureState(%d)", active);
+    const sp<ISoundTriggerHwService>& service = getSoundTriggerHwService();
+    if (service == 0) {
+        return NO_INIT;
+    }
+    return service->setCaptureState(active);
+}
+
 // SoundTrigger
 SoundTrigger::SoundTrigger(sound_trigger_module_handle_t module,
                                  const sp<SoundTriggerCallback>& callback)
@@ -192,6 +202,31 @@
     }
 }
 
+void SoundTrigger::onSoundModelEvent(const sp<IMemory>& eventMemory)
+{
+    Mutex::Autolock _l(mLock);
+    if (eventMemory == 0 || eventMemory->pointer() == NULL) {
+        return;
+    }
+
+    if (mCallback != 0) {
+        mCallback->onSoundModelEvent(
+                (struct sound_trigger_model_event *)eventMemory->pointer());
+    }
+}
+
+void SoundTrigger::onServiceStateChange(const sp<IMemory>& eventMemory)
+{
+    Mutex::Autolock _l(mLock);
+    if (eventMemory == 0 || eventMemory->pointer() == NULL) {
+        return;
+    }
+
+    if (mCallback != 0) {
+        mCallback->onServiceStateChange(
+                *((sound_trigger_service_state_t *)eventMemory->pointer()));
+    }
+}
 
 //IBinder::DeathRecipient
 void SoundTrigger::binderDied(const wp<IBinder>& who __unused) {