am 2f702c9b: am 43e10bb7: Merge "Move to libc++."

* commit '2f702c9bc0d07f5d48f6f50586b1e11d9ac59381':
  Move to libc++.
diff --git a/camera/VendorTagDescriptor.cpp b/camera/VendorTagDescriptor.cpp
index 0dda6b6..dce313a 100644
--- a/camera/VendorTagDescriptor.cpp
+++ b/camera/VendorTagDescriptor.cpp
@@ -206,7 +206,7 @@
         return res;
     }
 
-    size_t sectionCount;
+    size_t sectionCount = 0;
     if (tagCount > 0) {
         if ((res = parcel->readInt32(reinterpret_cast<int32_t*>(&sectionCount))) != OK) {
             ALOGE("%s: could not read section count for.", __FUNCTION__);
diff --git a/cmds/screenrecord/FrameOutput.cpp b/cmds/screenrecord/FrameOutput.cpp
index 03e0062..bef74f5 100644
--- a/cmds/screenrecord/FrameOutput.cpp
+++ b/cmds/screenrecord/FrameOutput.cpp
@@ -206,7 +206,7 @@
 }
 
 // Callback; executes on arbitrary thread.
-void FrameOutput::onFrameAvailable() {
+void FrameOutput::onFrameAvailable(const BufferItem& /* item */) {
     Mutex::Autolock _l(mMutex);
     mFrameAvailable = true;
     mEventCond.signal();
diff --git a/cmds/screenrecord/FrameOutput.h b/cmds/screenrecord/FrameOutput.h
index c49ec3b..4c0c3be 100644
--- a/cmds/screenrecord/FrameOutput.h
+++ b/cmds/screenrecord/FrameOutput.h
@@ -62,7 +62,7 @@
     }
 
     // (overrides GLConsumer::FrameAvailableListener method)
-    virtual void onFrameAvailable();
+    virtual void onFrameAvailable(const BufferItem& item);
 
     // Reduces RGBA to RGB, in place.
     static void reduceRgbaToRgb(uint8_t* buf, unsigned int pixelCount);
diff --git a/cmds/screenrecord/Overlay.cpp b/cmds/screenrecord/Overlay.cpp
index 7fef53d..c659170 100644
--- a/cmds/screenrecord/Overlay.cpp
+++ b/cmds/screenrecord/Overlay.cpp
@@ -274,7 +274,7 @@
 }
 
 // Callback; executes on arbitrary thread.
-void Overlay::onFrameAvailable() {
+void Overlay::onFrameAvailable(const BufferItem& /* item */) {
     ALOGV("Overlay::onFrameAvailable");
     Mutex::Autolock _l(mMutex);
     mFrameAvailable = true;
diff --git a/cmds/screenrecord/Overlay.h b/cmds/screenrecord/Overlay.h
index b1b5c29..ee3444d 100644
--- a/cmds/screenrecord/Overlay.h
+++ b/cmds/screenrecord/Overlay.h
@@ -78,7 +78,7 @@
             const Program& texRender, TextRenderer& textRenderer);
 
     // (overrides GLConsumer::FrameAvailableListener method)
-    virtual void onFrameAvailable();
+    virtual void onFrameAvailable(const BufferItem& item);
 
     // (overrides Thread method)
     virtual bool threadLoop();
diff --git a/drm/mediadrm/plugins/clearkey/DrmPlugin.h b/drm/mediadrm/plugins/clearkey/DrmPlugin.h
index 27df9cd..6139f1f 100644
--- a/drm/mediadrm/plugins/clearkey/DrmPlugin.h
+++ b/drm/mediadrm/plugins/clearkey/DrmPlugin.h
@@ -113,11 +113,21 @@
         return android::ERROR_DRM_CANNOT_HANDLE;
     }
 
+    virtual status_t getSecureStop(Vector<uint8_t> const &ssid, Vector<uint8_t> &secureStop) {
+        UNUSED(ssid);
+        UNUSED(secureStop);
+        return android::ERROR_DRM_CANNOT_HANDLE;
+    }
+
     virtual status_t releaseSecureStops(const Vector<uint8_t>& ssRelease) {
         UNUSED(ssRelease);
         return android::ERROR_DRM_CANNOT_HANDLE;
     }
 
+    virtual status_t releaseAllSecureStops() {
+        return android::ERROR_DRM_CANNOT_HANDLE;
+    }
+
     virtual status_t getPropertyString(
             const String8& name, String8& value) const;
 
diff --git a/drm/mediadrm/plugins/mock/MockDrmCryptoPlugin.cpp b/drm/mediadrm/plugins/mock/MockDrmCryptoPlugin.cpp
index 2ea554b..7eac0a1 100644
--- a/drm/mediadrm/plugins/mock/MockDrmCryptoPlugin.cpp
+++ b/drm/mediadrm/plugins/mock/MockDrmCryptoPlugin.cpp
@@ -305,6 +305,24 @@
         return OK;
     }
 
+    status_t MockDrmPlugin::getSecureStop(Vector<uint8_t> const &ssid, Vector<uint8_t> &secureStop)
+    {
+        Mutex::Autolock lock(mLock);
+        ALOGD("MockDrmPlugin::getSecureStop()");
+
+        // Properties used in mock test, set by cts test app returned from mock plugin
+        //   byte[] mock-secure-stop  -> first secure stop in list
+
+        ssize_t index = mByteArrayProperties.indexOfKey(String8("mock-secure-stop"));
+        if (index < 0) {
+            ALOGD("Missing 'mock-secure-stop' parameter for mock");
+            return BAD_VALUE;
+        } else {
+            secureStop = mByteArrayProperties.valueAt(index);
+        }
+        return OK;
+    }
+
     status_t MockDrmPlugin::getSecureStops(List<Vector<uint8_t> > &secureStops)
     {
         Mutex::Autolock lock(mLock);
@@ -349,6 +367,13 @@
         return OK;
     }
 
+    status_t MockDrmPlugin::releaseAllSecureStops()
+    {
+        Mutex::Autolock lock(mLock);
+        ALOGD("MockDrmPlugin::releaseAllSecureStops()");
+        return OK;
+    }
+
     status_t MockDrmPlugin::getPropertyString(String8 const &name, String8 &value) const
     {
         ALOGD("MockDrmPlugin::getPropertyString(name=%s)", name.string());
diff --git a/drm/mediadrm/plugins/mock/MockDrmCryptoPlugin.h b/drm/mediadrm/plugins/mock/MockDrmCryptoPlugin.h
index 4b63299..d1d8058 100644
--- a/drm/mediadrm/plugins/mock/MockDrmCryptoPlugin.h
+++ b/drm/mediadrm/plugins/mock/MockDrmCryptoPlugin.h
@@ -88,7 +88,9 @@
         status_t unprovisionDevice();
 
         status_t getSecureStops(List<Vector<uint8_t> > &secureStops);
+        status_t getSecureStop(Vector<uint8_t> const &ssid, Vector<uint8_t> &secureStop);
         status_t releaseSecureStops(Vector<uint8_t> const &ssRelease);
+        status_t releaseAllSecureStops();
 
         status_t getPropertyString(String8 const &name, String8 &value ) const;
         status_t getPropertyByteArray(String8 const &name,
diff --git a/include/camera/ProCamera.h b/include/camera/ProCamera.h
index 83a3028..e9b687a 100644
--- a/include/camera/ProCamera.h
+++ b/include/camera/ProCamera.h
@@ -265,7 +265,7 @@
         }
 
     protected:
-        virtual void onFrameAvailable() {
+        virtual void onFrameAvailable(const BufferItem& /* item */) {
             sp<ProCamera> c = mCamera.promote();
             if (c.get() != NULL) {
                 c->onFrameAvailable(mStreamId);
diff --git a/include/media/AudioEffect.h b/include/media/AudioEffect.h
index 4932d40..583695d 100644
--- a/include/media/AudioEffect.h
+++ b/include/media/AudioEffect.h
@@ -133,10 +133,11 @@
      *
      * Returned value
      *   *descriptor updated with descriptors of pre processings enabled by default
-     *   *count      number of descriptors returned if returned status is N_ERROR.
+     *   *count      number of descriptors returned if returned status is NO_ERROR.
      *               total number of pre processing enabled by default if returned status is
      *               NO_MEMORY. This happens if the count passed as input is less than the number
-     *               of descriptors to return
+     *               of descriptors to return.
+     *               *count is limited to kMaxPreProcessing on return.
      */
     static status_t queryDefaultPreProcessing(int audioSession,
                                               effect_descriptor_t *descriptors,
@@ -391,6 +392,10 @@
       */
      static status_t guidToString(const effect_uuid_t *guid, char *str, size_t maxLen);
 
+     // kMaxPreProcessing is a reasonable value for the maximum number of preprocessing effects
+     // that can be applied simultaneously.
+     static const uint32_t kMaxPreProcessing = 10;
+
 protected:
      bool                    mEnabled;           // enable state
      int32_t                 mSessionId;         // audio session ID
diff --git a/include/media/AudioSystem.h b/include/media/AudioSystem.h
index f8c0198..6a0f2a6 100644
--- a/include/media/AudioSystem.h
+++ b/include/media/AudioSystem.h
@@ -116,8 +116,6 @@
     static status_t getLatency(audio_io_handle_t output,
                                uint32_t* latency);
 
-    static bool routedToA2dpOutput(audio_stream_type_t streamType);
-
     // return status NO_ERROR implies *buffSize > 0
     static status_t getInputBufferSize(uint32_t sampleRate, audio_format_t format,
         audio_channel_mask_t channelMask, size_t* buffSize);
@@ -377,7 +375,10 @@
     friend class AudioFlingerClient;
     friend class AudioPolicyServiceClient;
 
-    static Mutex gLock;
+    static Mutex gLock;     // protects all members except gAudioPolicyService,
+                            // gAudioPolicyServiceClient, and gAudioPortCallback
+    static Mutex gLockAPS;  // protects gAudioPolicyService and gAudioPolicyServiceClient
+    static Mutex gLockAPC;  // protects gAudioPortCallback
     static sp<IAudioFlinger> gAudioFlinger;
     static audio_error_callback gAudioErrorCallback;
 
diff --git a/include/media/ICrypto.h b/include/media/ICrypto.h
index 9dcb8d9..07742ca 100644
--- a/include/media/ICrypto.h
+++ b/include/media/ICrypto.h
@@ -41,6 +41,8 @@
     virtual bool requiresSecureDecoderComponent(
             const char *mime) const = 0;
 
+    virtual void notifyResolution(uint32_t width, uint32_t height) = 0;
+
     virtual ssize_t decrypt(
             bool secure,
             const uint8_t key[16],
@@ -64,4 +66,3 @@
 }  // namespace android
 
 #endif // ANDROID_ICRYPTO_H_
-
diff --git a/include/media/IDrm.h b/include/media/IDrm.h
index 68de87a..affcbd7 100644
--- a/include/media/IDrm.h
+++ b/include/media/IDrm.h
@@ -73,8 +73,10 @@
     virtual status_t unprovisionDevice() = 0;
 
     virtual status_t getSecureStops(List<Vector<uint8_t> > &secureStops) = 0;
+    virtual status_t getSecureStop(Vector<uint8_t> const &ssid, Vector<uint8_t> &secureStop) = 0;
 
     virtual status_t releaseSecureStops(Vector<uint8_t> const &ssRelease) = 0;
+    virtual status_t releaseAllSecureStops() = 0;
 
     virtual status_t getPropertyString(String8 const &name, String8 &value) const = 0;
     virtual status_t getPropertyByteArray(String8 const &name,
@@ -137,4 +139,3 @@
 }  // namespace android
 
 #endif // ANDROID_IDRM_H_
-
diff --git a/include/media/MediaPlayerInterface.h b/include/media/MediaPlayerInterface.h
index cf18a45..c412299 100644
--- a/include/media/MediaPlayerInterface.h
+++ b/include/media/MediaPlayerInterface.h
@@ -208,8 +208,15 @@
 
     void        sendEvent(int msg, int ext1=0, int ext2=0,
                           const Parcel *obj=NULL) {
-        Mutex::Autolock autoLock(mNotifyLock);
-        if (mNotify) mNotify(mCookie, msg, ext1, ext2, obj);
+        notify_callback_f notifyCB;
+        void* cookie;
+        {
+            Mutex::Autolock autoLock(mNotifyLock);
+            notifyCB = mNotify;
+            cookie = mCookie;
+        }
+
+        if (notifyCB) notifyCB(cookie, msg, ext1, ext2, obj);
     }
 
     virtual status_t dump(int fd, const Vector<String16> &args) const {
diff --git a/include/media/stagefright/ACodec.h b/include/media/stagefright/ACodec.h
index d77ddaf..fcccc6d 100644
--- a/include/media/stagefright/ACodec.h
+++ b/include/media/stagefright/ACodec.h
@@ -120,6 +120,7 @@
         kWhatSetParameters           = 'setP',
         kWhatSubmitOutputMetaDataBufferIfEOS = 'subm',
         kWhatOMXDied                 = 'OMXd',
+        kWhatReleaseCodecInstance    = 'relC',
     };
 
     enum {
diff --git a/include/media/stagefright/MediaCodec.h b/include/media/stagefright/MediaCodec.h
index bca78b9..54a4e8b 100644
--- a/include/media/stagefright/MediaCodec.h
+++ b/include/media/stagefright/MediaCodec.h
@@ -205,6 +205,7 @@
         kFlagIsEncoder                  = 256,
         kFlagGatherCodecSpecificData    = 512,
         kFlagIsAsync                    = 1024,
+        kFlagIsComponentAllocated       = 2048,
     };
 
     struct BufferInfo {
diff --git a/include/media/stagefright/SurfaceMediaSource.h b/include/media/stagefright/SurfaceMediaSource.h
index ffe4f4c..2177c00 100644
--- a/include/media/stagefright/SurfaceMediaSource.h
+++ b/include/media/stagefright/SurfaceMediaSource.h
@@ -126,7 +126,7 @@
     // Implementation of the BufferQueue::ConsumerListener interface.  These
     // calls are used to notify the Surface of asynchronous events in the
     // BufferQueue.
-    virtual void onFrameAvailable();
+    virtual void onFrameAvailable(const BufferItem& item);
 
     // Used as a hook to BufferQueue::disconnect()
     // This is called by the client side when it is done
diff --git a/media/libmedia/AudioSystem.cpp b/media/libmedia/AudioSystem.cpp
index dda3657..f5c3383 100644
--- a/media/libmedia/AudioSystem.cpp
+++ b/media/libmedia/AudioSystem.cpp
@@ -32,6 +32,8 @@
 
 // client singleton for AudioFlinger binder interface
 Mutex AudioSystem::gLock;
+Mutex AudioSystem::gLockAPS;
+Mutex AudioSystem::gLockAPC;
 sp<IAudioFlinger> AudioSystem::gAudioFlinger;
 sp<AudioSystem::AudioFlingerClient> AudioSystem::gAudioFlingerClient;
 audio_error_callback AudioSystem::gAudioErrorCallback = NULL;
@@ -70,9 +72,9 @@
         }
         binder->linkToDeath(gAudioFlingerClient);
         gAudioFlinger = interface_cast<IAudioFlinger>(binder);
+        LOG_ALWAYS_FATAL_IF(gAudioFlinger == 0);
         gAudioFlinger->registerClient(gAudioFlingerClient);
     }
-    ALOGE_IF(gAudioFlinger==0, "no AudioFlinger!?");
 
     return gAudioFlinger;
 }
@@ -543,22 +545,8 @@
     gAudioErrorCallback = cb;
 }
 
-
-bool AudioSystem::routedToA2dpOutput(audio_stream_type_t streamType)
-{
-    switch (streamType) {
-    case AUDIO_STREAM_MUSIC:
-    case AUDIO_STREAM_VOICE_CALL:
-    case AUDIO_STREAM_BLUETOOTH_SCO:
-    case AUDIO_STREAM_SYSTEM:
-        return true;
-    default:
-        return false;
-    }
-}
-
-
 // client singleton for AudioPolicyService binder interface
+// protected by gLockAPS
 sp<IAudioPolicyService> AudioSystem::gAudioPolicyService;
 sp<AudioSystem::AudioPolicyServiceClient> AudioSystem::gAudioPolicyServiceClient;
 
@@ -566,7 +554,7 @@
 // establish binder interface to AudioPolicy service
 const sp<IAudioPolicyService>& AudioSystem::get_audio_policy_service()
 {
-    gLock.lock();
+    Mutex::Autolock _l(gLockAPS);
     if (gAudioPolicyService == 0) {
         sp<IServiceManager> sm = defaultServiceManager();
         sp<IBinder> binder;
@@ -582,15 +570,10 @@
         }
         binder->linkToDeath(gAudioPolicyServiceClient);
         gAudioPolicyService = interface_cast<IAudioPolicyService>(binder);
-        gLock.unlock();
-        // Registering the client takes the AudioPolicyService lock.
-        // Don't hold the AudioSystem lock at the same time.
+        LOG_ALWAYS_FATAL_IF(gAudioPolicyService == 0);
         gAudioPolicyService->registerClient(gAudioPolicyServiceClient);
-    } else {
-        // There exists a benign race condition where gAudioPolicyService
-        // is set, but gAudioPolicyServiceClient is not yet registered.
-        gLock.unlock();
     }
+
     return gAudioPolicyService;
 }
 
@@ -856,9 +839,18 @@
 
 void AudioSystem::clearAudioConfigCache()
 {
-    Mutex::Autolock _l(gLock);
+    // called by restoreTrack_l(), which needs new IAudioFlinger and IAudioPolicyService instances
     ALOGV("clearAudioConfigCache()");
-    gOutputs.clear();
+    {
+        Mutex::Autolock _l(gLock);
+        gOutputs.clear();
+        gAudioFlinger.clear();
+    }
+    {
+        Mutex::Autolock _l(gLockAPS);
+        gAudioPolicyService.clear();
+    }
+    // Do not clear gAudioPortCallback
 }
 
 bool AudioSystem::isOffloadSupported(const audio_offload_info_t& info)
@@ -920,7 +912,7 @@
 
 void AudioSystem::setAudioPortCallback(sp<AudioPortCallback> callBack)
 {
-    Mutex::Autolock _l(gLock);
+    Mutex::Autolock _l(gLockAPC);
     gAudioPortCallback = callBack;
 }
 
@@ -952,18 +944,23 @@
 
 void AudioSystem::AudioPolicyServiceClient::binderDied(const wp<IBinder>& who __unused)
 {
-    Mutex::Autolock _l(gLock);
-    if (gAudioPortCallback != 0) {
-        gAudioPortCallback->onServiceDied();
+    {
+        Mutex::Autolock _l(gLockAPC);
+        if (gAudioPortCallback != 0) {
+            gAudioPortCallback->onServiceDied();
+        }
     }
-    AudioSystem::gAudioPolicyService.clear();
+    {
+        Mutex::Autolock _l(gLockAPS);
+        AudioSystem::gAudioPolicyService.clear();
+    }
 
     ALOGW("AudioPolicyService server died!");
 }
 
 void AudioSystem::AudioPolicyServiceClient::onAudioPortListUpdate()
 {
-    Mutex::Autolock _l(gLock);
+    Mutex::Autolock _l(gLockAPC);
     if (gAudioPortCallback != 0) {
         gAudioPortCallback->onAudioPortListUpdate();
     }
@@ -971,7 +968,7 @@
 
 void AudioSystem::AudioPolicyServiceClient::onAudioPatchListUpdate()
 {
-    Mutex::Autolock _l(gLock);
+    Mutex::Autolock _l(gLockAPC);
     if (gAudioPortCallback != 0) {
         gAudioPortCallback->onAudioPatchListUpdate();
     }
diff --git a/media/libmedia/AudioTrack.cpp b/media/libmedia/AudioTrack.cpp
index 0a89fbb..cd493f6 100644
--- a/media/libmedia/AudioTrack.cpp
+++ b/media/libmedia/AudioTrack.cpp
@@ -278,7 +278,9 @@
     }
 
     // handle default values first.
-    if (streamType == AUDIO_STREAM_DEFAULT) {
+    // TODO once AudioPolicyManager fully supports audio_attributes_t,
+    //   remove stream "text-to-speech" redirect
+    if ((streamType == AUDIO_STREAM_DEFAULT) || (streamType == AUDIO_STREAM_TTS)) {
         streamType = AUDIO_STREAM_MUSIC;
     }
 
@@ -1826,7 +1828,7 @@
     status_t result;
 
     // refresh the audio configuration cache in this process to make sure we get new
-    // output parameters in createTrack_l()
+    // output parameters and new IAudioFlinger in createTrack_l()
     AudioSystem::clearAudioConfigCache();
 
     if (isOffloadedOrDirect_l()) {
@@ -2124,17 +2126,30 @@
         mStreamType = AUDIO_STREAM_BLUETOOTH_SCO;
         return;
     }
+    // TODO once AudioPolicyManager fully supports audio_attributes_t,
+    //   remove stream remap, the flag will be enough
+    if ((aa.flags & AUDIO_FLAG_BEACON) == AUDIO_FLAG_BEACON) {
+        mStreamType = AUDIO_STREAM_TTS;
+        return;
+    }
 
     // usage to stream type mapping
     switch (aa.usage) {
-    case AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY:
+    case AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY: {
         // TODO once AudioPolicyManager fully supports audio_attributes_t,
-        //   remove stream change based on phone state
-        if (AudioSystem::getPhoneState() == AUDIO_MODE_RINGTONE) {
+        //   remove stream change based on stream activity
+        bool active;
+        status_t status = AudioSystem::isStreamActive(AUDIO_STREAM_RING, &active, 0);
+        if (status == NO_ERROR && active == true) {
             mStreamType = AUDIO_STREAM_RING;
             break;
         }
-        /// FALL THROUGH
+        status = AudioSystem::isStreamActive(AUDIO_STREAM_ALARM, &active, 0);
+        if (status == NO_ERROR && active == true) {
+            mStreamType = AUDIO_STREAM_ALARM;
+            break;
+        }
+    }    /// FALL THROUGH
     case AUDIO_USAGE_MEDIA:
     case AUDIO_USAGE_GAME:
     case AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE:
@@ -2174,7 +2189,7 @@
 
 bool AudioTrack::isValidAttributes(const audio_attributes_t *paa) {
     // has flags that map to a strategy?
-    if ((paa->flags & (AUDIO_FLAG_AUDIBILITY_ENFORCED | AUDIO_FLAG_SCO)) != 0) {
+    if ((paa->flags & (AUDIO_FLAG_AUDIBILITY_ENFORCED | AUDIO_FLAG_SCO | AUDIO_FLAG_BEACON)) != 0) {
         return true;
     }
 
diff --git a/media/libmedia/AudioTrackShared.cpp b/media/libmedia/AudioTrackShared.cpp
index eec025e..561cb24 100644
--- a/media/libmedia/AudioTrackShared.cpp
+++ b/media/libmedia/AudioTrackShared.cpp
@@ -348,7 +348,13 @@
 
 void AudioTrackClientProxy::flush()
 {
-    mCblk->u.mStreaming.mFlush++;
+    // This works for mFrameCountP2 <= 2^30
+    size_t increment = mFrameCountP2 << 1;
+    size_t mask = increment - 1;
+    audio_track_cblk_t* cblk = mCblk;
+    int32_t newFlush = (cblk->u.mStreaming.mRear & mask) |
+                        ((cblk->u.mStreaming.mFlush & ~mask) + increment);
+    android_atomic_release_store(newFlush, &cblk->u.mStreaming.mFlush);
 }
 
 bool AudioTrackClientProxy::clearStreamEndDone() {
@@ -536,17 +542,27 @@
         rear = android_atomic_acquire_load(&cblk->u.mStreaming.mRear);
         front = cblk->u.mStreaming.mFront;
         if (flush != mFlush) {
-            mFlush = flush;
             // effectively obtain then release whatever is in the buffer
-            android_atomic_release_store(rear, &cblk->u.mStreaming.mFront);
-            if (front != rear) {
+            size_t mask = (mFrameCountP2 << 1) - 1;
+            int32_t newFront = (front & ~mask) | (flush & mask);
+            ssize_t filled = rear - newFront;
+            // Rather than shutting down on a corrupt flush, just treat it as a full flush
+            if (!(0 <= filled && (size_t) filled <= mFrameCount)) {
+                ALOGE("mFlush %#x -> %#x, front %#x, rear %#x, mask %#x, newFront %#x, filled %d=%#x",
+                        mFlush, flush, front, rear, mask, newFront, filled, filled);
+                newFront = rear;
+            }
+            mFlush = flush;
+            android_atomic_release_store(newFront, &cblk->u.mStreaming.mFront);
+            // There is no danger from a false positive, so err on the side of caution
+            if (true /*front != newFront*/) {
                 int32_t old = android_atomic_or(CBLK_FUTEX_WAKE, &cblk->mFutex);
                 if (!(old & CBLK_FUTEX_WAKE)) {
                     (void) syscall(__NR_futex, &cblk->mFutex,
                             mClientInServer ? FUTEX_WAKE_PRIVATE : FUTEX_WAKE, 1);
                 }
             }
-            front = rear;
+            front = newFront;
         }
     } else {
         front = android_atomic_acquire_load(&cblk->u.mStreaming.mFront);
@@ -668,6 +684,7 @@
 
     int32_t flush = cblk->u.mStreaming.mFlush;
     if (flush != mFlush) {
+        // FIXME should return an accurate value, but over-estimate is better than under-estimate
         return mFrameCount;
     }
     // the acquire might not be necessary since not doing a subsequent read
diff --git a/media/libmedia/IAudioPolicyService.cpp b/media/libmedia/IAudioPolicyService.cpp
index 256cb3f..89178f1 100644
--- a/media/libmedia/IAudioPolicyService.cpp
+++ b/media/libmedia/IAudioPolicyService.cpp
@@ -23,6 +23,7 @@
 
 #include <binder/Parcel.h>
 
+#include <media/AudioEffect.h>
 #include <media/IAudioPolicyService.h>
 
 #include <system/audio.h>
@@ -704,8 +705,8 @@
 
         case GET_OUTPUT_FOR_ATTR: {
             CHECK_INTERFACE(IAudioPolicyService, data, reply);
-            audio_attributes_t *attr = (audio_attributes_t *) calloc(1, sizeof(audio_attributes_t));
-            data.read(attr, sizeof(audio_attributes_t));
+            audio_attributes_t attr;
+            data.read(&attr, sizeof(audio_attributes_t));
             uint32_t samplingRate = data.readInt32();
             audio_format_t format = (audio_format_t) data.readInt32();
             audio_channel_mask_t channelMask = data.readInt32();
@@ -716,7 +717,7 @@
             if (hasOffloadInfo) {
                 data.read(&offloadInfo, sizeof(audio_offload_info_t));
             }
-            audio_io_handle_t output = getOutputForAttr(attr,
+            audio_io_handle_t output = getOutputForAttr(&attr,
                     samplingRate,
                     format,
                     channelMask,
@@ -916,16 +917,18 @@
             CHECK_INTERFACE(IAudioPolicyService, data, reply);
             int audioSession = data.readInt32();
             uint32_t count = data.readInt32();
+            if (count > AudioEffect::kMaxPreProcessing) {
+                count = AudioEffect::kMaxPreProcessing;
+            }
             uint32_t retCount = count;
-            effect_descriptor_t *descriptors =
-                    (effect_descriptor_t *)new char[count * sizeof(effect_descriptor_t)];
+            effect_descriptor_t *descriptors = new effect_descriptor_t[count];
             status_t status = queryDefaultPreProcessing(audioSession, descriptors, &retCount);
             reply->writeInt32(status);
             if (status != NO_ERROR && status != NO_MEMORY) {
                 retCount = 0;
             }
             reply->writeInt32(retCount);
-            if (retCount) {
+            if (retCount != 0) {
                 if (retCount < count) {
                     count = retCount;
                 }
diff --git a/media/libmedia/ICrypto.cpp b/media/libmedia/ICrypto.cpp
index 0d5f990..c26c5bf 100644
--- a/media/libmedia/ICrypto.cpp
+++ b/media/libmedia/ICrypto.cpp
@@ -33,6 +33,7 @@
     DESTROY_PLUGIN,
     REQUIRES_SECURE_COMPONENT,
     DECRYPT,
+    NOTIFY_RESOLUTION,
 };
 
 struct BpCrypto : public BpInterface<ICrypto> {
@@ -149,6 +150,15 @@
         return result;
     }
 
+    virtual void notifyResolution(
+        uint32_t width, uint32_t height) {
+        Parcel data, reply;
+        data.writeInterfaceToken(ICrypto::getInterfaceDescriptor());
+        data.writeInt32(width);
+        data.writeInt32(height);
+        remote()->transact(NOTIFY_RESOLUTION, data, &reply);
+    }
+
 private:
     DISALLOW_EVIL_CONSTRUCTORS(BpCrypto);
 };
@@ -290,10 +300,20 @@
             return OK;
         }
 
+        case NOTIFY_RESOLUTION:
+        {
+            CHECK_INTERFACE(ICrypto, data, reply);
+
+            int32_t width = data.readInt32();
+            int32_t height = data.readInt32();
+            notifyResolution(width, height);
+
+            return OK;
+        }
+
         default:
             return BBinder::onTransact(code, data, reply, flags);
     }
 }
 
 }  // namespace android
-
diff --git a/media/libmedia/IDrm.cpp b/media/libmedia/IDrm.cpp
index 1904839..7e74de9 100644
--- a/media/libmedia/IDrm.cpp
+++ b/media/libmedia/IDrm.cpp
@@ -54,7 +54,9 @@
     SIGN_RSA,
     VERIFY,
     SET_LISTENER,
-    UNPROVISION_DEVICE
+    UNPROVISION_DEVICE,
+    GET_SECURE_STOP,
+    RELEASE_ALL_SECURE_STOPS
 };
 
 struct BpDrm : public BpInterface<IDrm> {
@@ -255,6 +257,17 @@
         return reply.readInt32();
     }
 
+    virtual status_t getSecureStop(Vector<uint8_t> const &ssid, Vector<uint8_t> &secureStop) {
+        Parcel data, reply;
+        data.writeInterfaceToken(IDrm::getInterfaceDescriptor());
+
+        writeVector(data, ssid);
+        remote()->transact(GET_SECURE_STOP, data, &reply);
+
+        readVector(reply, secureStop);
+        return reply.readInt32();
+    }
+
     virtual status_t releaseSecureStops(Vector<uint8_t> const &ssRelease) {
         Parcel data, reply;
         data.writeInterfaceToken(IDrm::getInterfaceDescriptor());
@@ -265,6 +278,15 @@
         return reply.readInt32();
     }
 
+    virtual status_t releaseAllSecureStops() {
+        Parcel data, reply;
+        data.writeInterfaceToken(IDrm::getInterfaceDescriptor());
+
+        remote()->transact(RELEASE_ALL_SECURE_STOPS, data, &reply);
+
+        return reply.readInt32();
+    }
+
     virtual status_t getPropertyString(String8 const &name, String8 &value) const {
         Parcel data, reply;
         data.writeInterfaceToken(IDrm::getInterfaceDescriptor());
@@ -655,6 +677,17 @@
             return OK;
         }
 
+        case GET_SECURE_STOP:
+        {
+            CHECK_INTERFACE(IDrm, data, reply);
+            Vector<uint8_t> ssid, secureStop;
+            readVector(data, ssid);
+            status_t result = getSecureStop(ssid, secureStop);
+            writeVector(reply, secureStop);
+            reply->writeInt32(result);
+            return OK;
+        }
+
         case RELEASE_SECURE_STOPS:
         {
             CHECK_INTERFACE(IDrm, data, reply);
@@ -664,6 +697,13 @@
             return OK;
         }
 
+        case RELEASE_ALL_SECURE_STOPS:
+        {
+            CHECK_INTERFACE(IDrm, data, reply);
+            reply->writeInt32(releaseAllSecureStops());
+            return OK;
+        }
+
         case GET_PROPERTY_STRING:
         {
             CHECK_INTERFACE(IDrm, data, reply);
@@ -809,4 +849,3 @@
 }
 
 }  // namespace android
-
diff --git a/media/libmedia/Visualizer.cpp b/media/libmedia/Visualizer.cpp
index c146b8d..f91e3e4 100644
--- a/media/libmedia/Visualizer.cpp
+++ b/media/libmedia/Visualizer.cpp
@@ -52,6 +52,13 @@
 
 Visualizer::~Visualizer()
 {
+    ALOGV("Visualizer::~Visualizer()");
+    if (mCaptureThread != NULL) {
+        mCaptureThread->requestExitAndWait();
+        mCaptureThread.clear();
+    }
+    mCaptureCallBack = NULL;
+    mCaptureFlags = 0;
 }
 
 status_t Visualizer::setEnabled(bool enabled)
@@ -102,20 +109,18 @@
         return INVALID_OPERATION;
     }
 
-    sp<CaptureThread> t = mCaptureThread;
-    if (t != 0) {
-        t->mLock.lock();
+    if (mCaptureThread != 0) {
+        mCaptureLock.unlock();
+        mCaptureThread->requestExitAndWait();
+        mCaptureLock.lock();
     }
+
     mCaptureThread.clear();
     mCaptureCallBack = cbk;
     mCaptureCbkUser = user;
     mCaptureFlags = flags;
     mCaptureRate = rate;
 
-    if (t != 0) {
-        t->mLock.unlock();
-    }
-
     if (cbk != NULL) {
         mCaptureThread = new CaptureThread(*this, rate, ((flags & CAPTURE_CALL_JAVA) != 0));
     }
diff --git a/media/libmediaplayerservice/Crypto.cpp b/media/libmediaplayerservice/Crypto.cpp
index 62593b2..8ee7c0b 100644
--- a/media/libmediaplayerservice/Crypto.cpp
+++ b/media/libmediaplayerservice/Crypto.cpp
@@ -257,4 +257,12 @@
             errorDetailMsg);
 }
 
+void Crypto::notifyResolution(uint32_t width, uint32_t height) {
+    Mutex::Autolock autoLock(mLock);
+
+    if (mInitCheck == OK && mPlugin != NULL) {
+        mPlugin->notifyResolution(width, height);
+    }
+}
+
 }  // namespace android
diff --git a/media/libmediaplayerservice/Crypto.h b/media/libmediaplayerservice/Crypto.h
index c44ae34..0037c2e 100644
--- a/media/libmediaplayerservice/Crypto.h
+++ b/media/libmediaplayerservice/Crypto.h
@@ -45,6 +45,8 @@
     virtual bool requiresSecureDecoderComponent(
             const char *mime) const;
 
+    virtual void notifyResolution(uint32_t width, uint32_t height);
+
     virtual ssize_t decrypt(
             bool secure,
             const uint8_t key[16],
diff --git a/media/libmediaplayerservice/Drm.cpp b/media/libmediaplayerservice/Drm.cpp
index d222316..2a8b2c6 100644
--- a/media/libmediaplayerservice/Drm.cpp
+++ b/media/libmediaplayerservice/Drm.cpp
@@ -449,6 +449,20 @@
     return mPlugin->getSecureStops(secureStops);
 }
 
+status_t Drm::getSecureStop(Vector<uint8_t> const &ssid, Vector<uint8_t> &secureStop) {
+    Mutex::Autolock autoLock(mLock);
+
+    if (mInitCheck != OK) {
+        return mInitCheck;
+    }
+
+    if (mPlugin == NULL) {
+        return -EINVAL;
+    }
+
+    return mPlugin->getSecureStop(ssid, secureStop);
+}
+
 status_t Drm::releaseSecureStops(Vector<uint8_t> const &ssRelease) {
     Mutex::Autolock autoLock(mLock);
 
@@ -463,6 +477,20 @@
     return mPlugin->releaseSecureStops(ssRelease);
 }
 
+status_t Drm::releaseAllSecureStops() {
+    Mutex::Autolock autoLock(mLock);
+
+    if (mInitCheck != OK) {
+        return mInitCheck;
+    }
+
+    if (mPlugin == NULL) {
+        return -EINVAL;
+    }
+
+    return mPlugin->releaseAllSecureStops();
+}
+
 status_t Drm::getPropertyString(String8 const &name, String8 &value ) const {
     Mutex::Autolock autoLock(mLock);
 
diff --git a/media/libmediaplayerservice/Drm.h b/media/libmediaplayerservice/Drm.h
index 9e23e2e..0e1eb2c 100644
--- a/media/libmediaplayerservice/Drm.h
+++ b/media/libmediaplayerservice/Drm.h
@@ -78,8 +78,10 @@
     virtual status_t unprovisionDevice();
 
     virtual status_t getSecureStops(List<Vector<uint8_t> > &secureStops);
+    virtual status_t getSecureStop(Vector<uint8_t> const &ssid, Vector<uint8_t> &secureStop);
 
     virtual status_t releaseSecureStops(Vector<uint8_t> const &ssRelease);
+    virtual status_t releaseAllSecureStops();
 
     virtual status_t getPropertyString(String8 const &name, String8 &value ) const;
     virtual status_t getPropertyByteArray(String8 const &name,
diff --git a/media/libmediaplayerservice/MediaPlayerService.cpp b/media/libmediaplayerservice/MediaPlayerService.cpp
index 8eb1269..c120898 100644
--- a/media/libmediaplayerservice/MediaPlayerService.cpp
+++ b/media/libmediaplayerservice/MediaPlayerService.cpp
@@ -2159,7 +2159,6 @@
     {
     case MEDIA_ERROR:
         ALOGE("Error %d, %d occurred", ext1, ext2);
-        p->mError = ext1;
         break;
     case MEDIA_PREPARED:
         ALOGV("prepared");
@@ -2174,6 +2173,9 @@
 
     // wake up thread
     Mutex::Autolock lock(p->mLock);
+    if (msg == MEDIA_ERROR) {
+        p->mError = ext1;
+    }
     p->mCommandComplete = true;
     p->mSignal.signal();
 }
diff --git a/media/libmediaplayerservice/StagefrightRecorder.cpp b/media/libmediaplayerservice/StagefrightRecorder.cpp
index cadd691..3d093fa 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.cpp
+++ b/media/libmediaplayerservice/StagefrightRecorder.cpp
@@ -111,7 +111,7 @@
 status_t StagefrightRecorder::setAudioSource(audio_source_t as) {
     ALOGV("setAudioSource: %d", as);
     if (as < AUDIO_SOURCE_DEFAULT ||
-        as >= AUDIO_SOURCE_CNT) {
+        (as >= AUDIO_SOURCE_CNT && as != AUDIO_SOURCE_FM_TUNER)) {
         ALOGE("Invalid audio source: %d", as);
         return BAD_VALUE;
     }
@@ -981,7 +981,7 @@
 }
 
 status_t StagefrightRecorder::setupRawAudioRecording() {
-    if (mAudioSource >= AUDIO_SOURCE_CNT) {
+    if (mAudioSource >= AUDIO_SOURCE_CNT && mAudioSource != AUDIO_SOURCE_FM_TUNER) {
         ALOGE("Invalid audio source: %d", mAudioSource);
         return BAD_VALUE;
     }
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.cpp b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
index 6859a1a..d446cec 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
@@ -45,6 +45,10 @@
         bool uidValid,
         uid_t uid)
     : Source(notify),
+      mAudioTimeUs(0),
+      mAudioLastDequeueTimeUs(0),
+      mVideoTimeUs(0),
+      mVideoLastDequeueTimeUs(0),
       mFetchSubtitleDataGeneration(0),
       mFetchTimedTextDataGeneration(0),
       mDurationUs(0ll),
@@ -62,8 +66,6 @@
 }
 
 void NuPlayer::GenericSource::resetDataSource() {
-    mAudioTimeUs = 0;
-    mVideoTimeUs = 0;
     mHTTPService.clear();
     mHttpSource.clear();
     mUri.clear();
@@ -644,17 +646,13 @@
           track->mSource->start();
           track->mIndex = trackIndex;
 
-          status_t avail;
-          if (!track->mPackets->hasBufferAvailable(&avail)) {
-              // sync from other source
-              TRESPASS();
-              break;
-          }
-
           int64_t timeUs, actualTimeUs;
           const bool formatChange = true;
-          sp<AMessage> latestMeta = track->mPackets->getLatestEnqueuedMeta();
-          CHECK(latestMeta != NULL && latestMeta->findInt64("timeUs", &timeUs));
+          if (trackType == MEDIA_TRACK_TYPE_AUDIO) {
+              timeUs = mAudioLastDequeueTimeUs;
+          } else {
+              timeUs = mVideoLastDequeueTimeUs;
+          }
           readBuffer(trackType, timeUs, &actualTimeUs, formatChange);
           readBuffer(counterpartType, -1, NULL, formatChange);
           ALOGV("timeUs %lld actualTimeUs %lld", timeUs, actualTimeUs);
@@ -866,6 +864,11 @@
     int64_t timeUs;
     status_t eosResult; // ignored
     CHECK((*accessUnit)->meta()->findInt64("timeUs", &timeUs));
+    if (audio) {
+        mAudioLastDequeueTimeUs = timeUs;
+    } else {
+        mVideoLastDequeueTimeUs = timeUs;
+    }
 
     if (mSubtitleTrack.mSource != NULL
             && !mSubtitleTrack.mPackets->hasBufferAvailable(&eosResult)) {
@@ -1132,10 +1135,12 @@
         readBuffer(MEDIA_TRACK_TYPE_VIDEO, seekTimeUs, &actualTimeUs);
 
         seekTimeUs = actualTimeUs;
+        mVideoLastDequeueTimeUs = seekTimeUs;
     }
 
     if (mAudioTrack.mSource != NULL) {
         readBuffer(MEDIA_TRACK_TYPE_AUDIO, seekTimeUs);
+        mAudioLastDequeueTimeUs = seekTimeUs;
     }
 
     setDrmPlaybackStatusIfNeeded(Playback::START, seekTimeUs / 1000);
@@ -1311,11 +1316,9 @@
             if ((seeking || formatChange)
                     && (trackType == MEDIA_TRACK_TYPE_AUDIO
                     || trackType == MEDIA_TRACK_TYPE_VIDEO)) {
-                ATSParser::DiscontinuityType type = formatChange
-                        ? (seeking
-                                ? ATSParser::DISCONTINUITY_FORMATCHANGE
-                                : ATSParser::DISCONTINUITY_NONE)
-                        : ATSParser::DISCONTINUITY_SEEK;
+                ATSParser::DiscontinuityType type = (formatChange && seeking)
+                        ? ATSParser::DISCONTINUITY_FORMATCHANGE
+                        : ATSParser::DISCONTINUITY_NONE;
                 track->mPackets->queueDiscontinuity( type, NULL, true /* discard */);
             }
 
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.h b/media/libmediaplayerservice/nuplayer/GenericSource.h
index f8601ea..7a03df0 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.h
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.h
@@ -106,8 +106,10 @@
 
     Track mAudioTrack;
     int64_t mAudioTimeUs;
+    int64_t mAudioLastDequeueTimeUs;
     Track mVideoTrack;
     int64_t mVideoTimeUs;
+    int64_t mVideoLastDequeueTimeUs;
     Track mSubtitleTrack;
     Track mTimedTextTrack;
 
diff --git a/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp b/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp
index a003c81..02e9caf 100644
--- a/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp
@@ -98,6 +98,10 @@
 }
 
 sp<AMessage> NuPlayer::HTTPLiveSource::getFormat(bool audio) {
+    if (mLiveSession == NULL) {
+        return NULL;
+    }
+
     sp<AMessage> format;
     status_t err = mLiveSession->getStreamFormat(
             audio ? LiveSession::STREAMTYPE_AUDIO
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
index a63a940..47bd989 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
@@ -95,21 +95,21 @@
     DISALLOW_EVIL_CONSTRUCTORS(SetSurfaceAction);
 };
 
-struct NuPlayer::ShutdownDecoderAction : public Action {
-    ShutdownDecoderAction(bool audio, bool video)
+struct NuPlayer::FlushDecoderAction : public Action {
+    FlushDecoderAction(FlushCommand audio, FlushCommand video)
         : mAudio(audio),
           mVideo(video) {
     }
 
     virtual void execute(NuPlayer *player) {
-        player->performDecoderShutdown(mAudio, mVideo);
+        player->performDecoderFlush(mAudio, mVideo);
     }
 
 private:
-    bool mAudio;
-    bool mVideo;
+    FlushCommand mAudio;
+    FlushCommand mVideo;
 
-    DISALLOW_EVIL_CONSTRUCTORS(ShutdownDecoderAction);
+    DISALLOW_EVIL_CONSTRUCTORS(FlushDecoderAction);
 };
 
 struct NuPlayer::PostMessageAction : public Action {
@@ -306,10 +306,6 @@
     (new AMessage(kWhatPause, id()))->post();
 }
 
-void NuPlayer::resume() {
-    (new AMessage(kWhatResume, id()))->post();
-}
-
 void NuPlayer::resetAsync() {
     if (mSource != NULL) {
         // During a reset, the data source might be unresponsive already, we need to
@@ -526,19 +522,24 @@
         {
             ALOGV("kWhatSetVideoNativeWindow");
 
-            mDeferredActions.push_back(
-                    new ShutdownDecoderAction(
-                        false /* audio */, true /* video */));
-
             sp<RefBase> obj;
             CHECK(msg->findObject("native-window", &obj));
 
+            if (mSource->getFormat(false /* audio */) == NULL) {
+                performSetSurface(static_cast<NativeWindowWrapper *>(obj.get()));
+                break;
+            }
+
+            mDeferredActions.push_back(
+                    new FlushDecoderAction(FLUSH_CMD_FLUSH /* audio */,
+                                           FLUSH_CMD_SHUTDOWN /* video */));
+
             mDeferredActions.push_back(
                     new SetSurfaceAction(
                         static_cast<NativeWindowWrapper *>(obj.get())));
 
             if (obj != NULL) {
-                if (mStarted && mSource->getFormat(false /* audio */) != NULL) {
+                if (mStarted) {
                     // Issue a seek to refresh the video screen only if started otherwise
                     // the extractor may not yet be started and will assert.
                     // If the video decoder is not set (perhaps audio only in this case)
@@ -574,69 +575,11 @@
         case kWhatStart:
         {
             ALOGV("kWhatStart");
-
-            mVideoIsAVC = false;
-            mOffloadAudio = false;
-            mAudioEOS = false;
-            mVideoEOS = false;
-            mSkipRenderingAudioUntilMediaTimeUs = -1;
-            mSkipRenderingVideoUntilMediaTimeUs = -1;
-            mNumFramesTotal = 0;
-            mNumFramesDropped = 0;
-            mStarted = true;
-
-            /* instantiate decoders now for secure playback */
-            if (mSourceFlags & Source::FLAG_SECURE) {
-                if (mNativeWindow != NULL) {
-                    instantiateDecoder(false, &mVideoDecoder);
-                }
-
-                if (mAudioSink != NULL) {
-                    instantiateDecoder(true, &mAudioDecoder);
-                }
+            if (mStarted) {
+                onResume();
+            } else {
+                onStart();
             }
-
-            mSource->start();
-
-            uint32_t flags = 0;
-
-            if (mSource->isRealTime()) {
-                flags |= Renderer::FLAG_REAL_TIME;
-            }
-
-            sp<MetaData> audioMeta = mSource->getFormatMeta(true /* audio */);
-            audio_stream_type_t streamType = AUDIO_STREAM_MUSIC;
-            if (mAudioSink != NULL) {
-                streamType = mAudioSink->getAudioStreamType();
-            }
-
-            sp<AMessage> videoFormat = mSource->getFormat(false /* audio */);
-
-            mOffloadAudio =
-                canOffloadStream(audioMeta, (videoFormat != NULL),
-                                 true /* is_streaming */, streamType);
-            if (mOffloadAudio) {
-                flags |= Renderer::FLAG_OFFLOAD_AUDIO;
-            }
-
-            sp<AMessage> notify = new AMessage(kWhatRendererNotify, id());
-            ++mRendererGeneration;
-            notify->setInt32("generation", mRendererGeneration);
-            mRenderer = new Renderer(mAudioSink, notify, flags);
-
-            mRendererLooper = new ALooper;
-            mRendererLooper->setName("NuPlayerRenderer");
-            mRendererLooper->start(false, false, ANDROID_PRIORITY_AUDIO);
-            mRendererLooper->registerHandler(mRenderer);
-
-            sp<MetaData> meta = getFileMeta();
-            int32_t rate;
-            if (meta != NULL
-                    && meta->findInt32(kKeyFrameRate, &rate) && rate > 0) {
-                mRenderer->setVideoFrameRate(rate);
-            }
-
-            postScanSources();
             break;
         }
 
@@ -811,7 +754,9 @@
                 switch (*flushing) {
                     case NONE:
                         mDeferredActions.push_back(
-                                new ShutdownDecoderAction(audio, !audio /* video */));
+                                new FlushDecoderAction(
+                                    audio ? FLUSH_CMD_SHUTDOWN : FLUSH_CMD_NONE,
+                                    audio ? FLUSH_CMD_NONE : FLUSH_CMD_SHUTDOWN));
                         processDeferredActions();
                         break;
                     case FLUSHING_DECODER:
@@ -934,8 +879,9 @@
             ALOGV("kWhatReset");
 
             mDeferredActions.push_back(
-                    new ShutdownDecoderAction(
-                        true /* audio */, true /* video */));
+                    new FlushDecoderAction(
+                        FLUSH_CMD_SHUTDOWN /* audio */,
+                        FLUSH_CMD_SHUTDOWN /* video */));
 
             mDeferredActions.push_back(
                     new SimpleAction(&NuPlayer::performReset));
@@ -955,7 +901,8 @@
                     seekTimeUs, needNotify);
 
             mDeferredActions.push_back(
-                    new SimpleAction(&NuPlayer::performDecoderFlush));
+                    new FlushDecoderAction(FLUSH_CMD_FLUSH /* audio */,
+                                           FLUSH_CMD_FLUSH /* video */));
 
             mDeferredActions.push_back(
                     new SeekAction(seekTimeUs, needNotify));
@@ -979,26 +926,6 @@
             break;
         }
 
-        case kWhatResume:
-        {
-            if (mSource != NULL) {
-                mSource->resume();
-            } else {
-                ALOGW("resume called when source is gone or not set");
-            }
-            // |mAudioDecoder| may have been released due to the pause timeout, so re-create it if
-            // needed.
-            if (audioDecoderStillNeeded() && mAudioDecoder == NULL) {
-                instantiateDecoder(true /* audio */, &mAudioDecoder);
-            }
-            if (mRenderer != NULL) {
-                mRenderer->resume();
-            } else {
-                ALOGW("resume called when renderer is gone or not set");
-            }
-            break;
-        }
-
         case kWhatSourceNotify:
         {
             onSourceNotify(msg);
@@ -1017,6 +944,89 @@
     }
 }
 
+void NuPlayer::onResume() {
+    if (mSource != NULL) {
+        mSource->resume();
+    } else {
+        ALOGW("resume called when source is gone or not set");
+    }
+    // |mAudioDecoder| may have been released due to the pause timeout, so re-create it if
+    // needed.
+    if (audioDecoderStillNeeded() && mAudioDecoder == NULL) {
+        instantiateDecoder(true /* audio */, &mAudioDecoder);
+    }
+    if (mRenderer != NULL) {
+        mRenderer->resume();
+    } else {
+        ALOGW("resume called when renderer is gone or not set");
+    }
+}
+
+void NuPlayer::onStart() {
+    mVideoIsAVC = false;
+    mOffloadAudio = false;
+    mAudioEOS = false;
+    mVideoEOS = false;
+    mSkipRenderingAudioUntilMediaTimeUs = -1;
+    mSkipRenderingVideoUntilMediaTimeUs = -1;
+    mNumFramesTotal = 0;
+    mNumFramesDropped = 0;
+    mStarted = true;
+
+    /* instantiate decoders now for secure playback */
+    if (mSourceFlags & Source::FLAG_SECURE) {
+        if (mNativeWindow != NULL) {
+            instantiateDecoder(false, &mVideoDecoder);
+        }
+
+        if (mAudioSink != NULL) {
+            instantiateDecoder(true, &mAudioDecoder);
+        }
+    }
+
+    mSource->start();
+
+    uint32_t flags = 0;
+
+    if (mSource->isRealTime()) {
+        flags |= Renderer::FLAG_REAL_TIME;
+    }
+
+    sp<MetaData> audioMeta = mSource->getFormatMeta(true /* audio */);
+    audio_stream_type_t streamType = AUDIO_STREAM_MUSIC;
+    if (mAudioSink != NULL) {
+        streamType = mAudioSink->getAudioStreamType();
+    }
+
+    sp<AMessage> videoFormat = mSource->getFormat(false /* audio */);
+
+    mOffloadAudio =
+        canOffloadStream(audioMeta, (videoFormat != NULL),
+                         true /* is_streaming */, streamType);
+    if (mOffloadAudio) {
+        flags |= Renderer::FLAG_OFFLOAD_AUDIO;
+    }
+
+    sp<AMessage> notify = new AMessage(kWhatRendererNotify, id());
+    ++mRendererGeneration;
+    notify->setInt32("generation", mRendererGeneration);
+    mRenderer = new Renderer(mAudioSink, notify, flags);
+
+    mRendererLooper = new ALooper;
+    mRendererLooper->setName("NuPlayerRenderer");
+    mRendererLooper->start(false, false, ANDROID_PRIORITY_AUDIO);
+    mRendererLooper->registerHandler(mRenderer);
+
+    sp<MetaData> meta = getFileMeta();
+    int32_t rate;
+    if (meta != NULL
+            && meta->findInt32(kKeyFrameRate, &rate) && rate > 0) {
+        mRenderer->setVideoFrameRate(rate);
+    }
+
+    postScanSources();
+}
+
 bool NuPlayer::audioDecoderStillNeeded() {
     // Audio decoder is no longer needed if it's in shut/shutting down status.
     return ((mFlushingAudio != SHUT_DOWN) && (mFlushingAudio != SHUTTING_DOWN_DECODER));
@@ -1119,7 +1129,7 @@
     // Current code will just make that we select deep buffer
     // with video which should not be a problem as it should
     // not prevent from keeping A/V sync.
-    if (hasVideo &&
+    if (!hasVideo &&
             mSource->getDuration(&durationUs) == OK &&
             durationUs
                 > AUDIO_SINK_MIN_DEEP_BUFFER_DURATION_US) {
@@ -1624,7 +1634,9 @@
     ALOGI("queueDecoderShutdown audio=%d, video=%d", audio, video);
 
     mDeferredActions.push_back(
-            new ShutdownDecoderAction(audio, video));
+            new FlushDecoderAction(
+                audio ? FLUSH_CMD_SHUTDOWN : FLUSH_CMD_NONE,
+                video ? FLUSH_CMD_SHUTDOWN : FLUSH_CMD_NONE));
 
     mDeferredActions.push_back(
             new SimpleAction(&NuPlayer::performScanSources));
@@ -1769,40 +1781,22 @@
     // everything's flushed, continue playback.
 }
 
-void NuPlayer::performDecoderFlush() {
-    ALOGV("performDecoderFlush");
+void NuPlayer::performDecoderFlush(FlushCommand audio, FlushCommand video) {
+    ALOGV("performDecoderFlush audio=%d, video=%d", audio, video);
 
-    if (mAudioDecoder == NULL && mVideoDecoder == NULL) {
+    if ((audio == FLUSH_CMD_NONE || mAudioDecoder == NULL)
+            && (video == FLUSH_CMD_NONE || mVideoDecoder == NULL)) {
         return;
     }
 
     mTimeDiscontinuityPending = true;
 
-    if (mAudioDecoder != NULL) {
-        flushDecoder(true /* audio */, false /* needShutdown */);
+    if (audio != FLUSH_CMD_NONE && mAudioDecoder != NULL) {
+        flushDecoder(true /* audio */, (audio == FLUSH_CMD_SHUTDOWN));
     }
 
-    if (mVideoDecoder != NULL) {
-        flushDecoder(false /* audio */, false /* needShutdown */);
-    }
-}
-
-void NuPlayer::performDecoderShutdown(bool audio, bool video) {
-    ALOGV("performDecoderShutdown audio=%d, video=%d", audio, video);
-
-    if ((!audio || mAudioDecoder == NULL)
-            && (!video || mVideoDecoder == NULL)) {
-        return;
-    }
-
-    mTimeDiscontinuityPending = true;
-
-    if (audio && mAudioDecoder != NULL) {
-        flushDecoder(true /* audio */, true /* needShutdown */);
-    }
-
-    if (video && mVideoDecoder != NULL) {
-        flushDecoder(false /* audio */, true /* needShutdown */);
+    if (video != FLUSH_CMD_NONE && mVideoDecoder != NULL) {
+        flushDecoder(false /* audio */, (video == FLUSH_CMD_SHUTDOWN));
     }
 }
 
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.h b/media/libmediaplayerservice/nuplayer/NuPlayer.h
index d6120d2..121f7dd 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.h
@@ -54,7 +54,6 @@
     void start();
 
     void pause();
-    void resume();
 
     // Will notify the driver through "notifyResetComplete" once finished.
     void resetAsync();
@@ -95,7 +94,7 @@
     struct Action;
     struct SeekAction;
     struct SetSurfaceAction;
-    struct ShutdownDecoderAction;
+    struct FlushDecoderAction;
     struct PostMessageAction;
     struct SimpleAction;
 
@@ -160,6 +159,12 @@
         SHUT_DOWN,
     };
 
+    enum FlushCommand {
+        FLUSH_CMD_NONE,
+        FLUSH_CMD_FLUSH,
+        FLUSH_CMD_SHUTDOWN,
+    };
+
     // Once the current flush is complete this indicates whether the
     // notion of time has changed.
     bool mTimeDiscontinuityPending;
@@ -213,6 +218,9 @@
     void handleFlushComplete(bool audio, bool isDecoder);
     void finishFlushIfPossible();
 
+    void onStart();
+    void onResume();
+
     bool audioDecoderStillNeeded();
 
     void flushDecoder(
@@ -227,8 +235,7 @@
     void processDeferredActions();
 
     void performSeek(int64_t seekTimeUs, bool needNotify);
-    void performDecoderFlush();
-    void performDecoderShutdown(bool audio, bool video);
+    void performDecoderFlush(FlushCommand audio, FlushCommand video);
     void performReset();
     void performScanSources();
     void performSetSurface(const sp<NativeWindowWrapper> &wrapper);
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
index ab46074..b9a1a6c 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
@@ -239,16 +239,24 @@
             // fall through
         }
 
+        case STATE_PAUSED:
+        case STATE_STOPPED_AND_PREPARED:
+        {
+            if (mAtEOS && mStartupSeekTimeUs < 0) {
+                mStartupSeekTimeUs = 0;
+                mPositionUs = -1;
+            }
+
+            // fall through
+        }
+
         case STATE_PREPARED:
         {
             mAtEOS = false;
             mPlayer->start();
 
             if (mStartupSeekTimeUs >= 0) {
-                if (mStartupSeekTimeUs > 0) {
-                    mPlayer->seekToAsync(mStartupSeekTimeUs);
-                }
-
+                mPlayer->seekToAsync(mStartupSeekTimeUs);
                 mStartupSeekTimeUs = -1;
             }
             break;
@@ -264,20 +272,6 @@
             break;
         }
 
-        case STATE_PAUSED:
-        case STATE_STOPPED_AND_PREPARED:
-        {
-            if (mAtEOS) {
-                mPlayer->seekToAsync(0);
-                mAtEOS = false;
-                mPlayer->resume();
-                mPositionUs = -1;
-            } else {
-                mPlayer->resume();
-            }
-            break;
-        }
-
         default:
             return INVALID_OPERATION;
     }
@@ -348,6 +342,7 @@
 
     switch (mState) {
         case STATE_PREPARED:
+        case STATE_STOPPED_AND_PREPARED:
         {
             mStartupSeekTimeUs = seekTimeUs;
             // pretend that the seek completed. It will actually happen when starting playback.
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
index 638d9bc..d9e3ee8 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
@@ -38,7 +38,7 @@
 
 // Maximum time in paused state when offloading audio decompression. When elapsed, the AudioSink
 // is closed to allow the audio DSP to power down.
-static const int64_t kOffloadPauseMaxUs = 60000000ll;
+static const int64_t kOffloadPauseMaxUs = 10000000ll;
 
 // static
 const int64_t NuPlayer::Renderer::kMinPositionUpdateDelayUs = 100000ll;
@@ -68,6 +68,8 @@
       mAudioFirstAnchorTimeMediaUs(-1),
       mAnchorTimeMediaUs(-1),
       mAnchorTimeRealUs(-1),
+      mAnchorNumFramesWritten(-1),
+      mAnchorMaxMediaUs(-1),
       mVideoLateByUs(0ll),
       mHasAudio(false),
       mHasVideo(false),
@@ -173,7 +175,8 @@
     return getCurrentPosition(mediaUs, ALooper::GetNowUs());
 }
 
-status_t NuPlayer::Renderer::getCurrentPosition(int64_t *mediaUs, int64_t nowUs) {
+status_t NuPlayer::Renderer::getCurrentPosition(
+        int64_t *mediaUs, int64_t nowUs, bool allowPastQueuedVideo) {
     Mutex::Autolock autoLock(mTimeLock);
     if (!mHasAudio && !mHasVideo) {
         return NO_INIT;
@@ -182,12 +185,21 @@
     if (mAnchorTimeMediaUs < 0) {
         return NO_INIT;
     }
+
     int64_t positionUs = (nowUs - mAnchorTimeRealUs) + mAnchorTimeMediaUs;
 
     if (mPauseStartedTimeRealUs != -1) {
         positionUs -= (nowUs - mPauseStartedTimeRealUs);
     }
 
+    // limit position to the last queued media time (for video only stream
+    // position will be discrete as we don't know how long each frame lasts)
+    if (mAnchorMaxMediaUs >= 0 && !allowPastQueuedVideo) {
+        if (positionUs > mAnchorMaxMediaUs) {
+            positionUs = mAnchorMaxMediaUs;
+        }
+    }
+
     if (positionUs < mAudioFirstAnchorTimeMediaUs) {
         positionUs = mAudioFirstAnchorTimeMediaUs;
     }
@@ -217,10 +229,12 @@
     }
 }
 
-void NuPlayer::Renderer::setAnchorTime(int64_t mediaUs, int64_t realUs, bool resume) {
+void NuPlayer::Renderer::setAnchorTime(
+        int64_t mediaUs, int64_t realUs, int64_t numFramesWritten, bool resume) {
     Mutex::Autolock autoLock(mTimeLock);
     mAnchorTimeMediaUs = mediaUs;
     mAnchorTimeRealUs = realUs;
+    mAnchorNumFramesWritten = numFramesWritten;
     if (resume) {
         mPauseStartedTimeRealUs = -1;
     }
@@ -541,7 +555,7 @@
             int64_t mediaTimeUs;
             CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
             ALOGV("rendering audio at media time %.2f secs", mediaTimeUs / 1E6);
-            onNewAudioMediaTime(mediaTimeUs);
+            setAudioFirstAnchorTimeIfNeeded(mediaTimeUs);
         }
 
         size_t copy = entry->mBuffer->size() - entry->mOffset;
@@ -564,6 +578,14 @@
         notifyIfMediaRenderingStarted();
     }
 
+    if (mAudioFirstAnchorTimeMediaUs >= 0) {
+        int64_t nowUs = ALooper::GetNowUs();
+        setAnchorTime(mAudioFirstAnchorTimeMediaUs, nowUs - getPlayedOutAudioDurationUs(nowUs));
+    }
+
+    // we don't know how much data we are queueing for offloaded tracks
+    mAnchorMaxMediaUs = -1;
+
     if (hasEOS) {
         (new AMessage(kWhatStopAudioSink, id()))->post();
     }
@@ -607,6 +629,10 @@
 
             mAudioQueue.erase(mAudioQueue.begin());
             entry = NULL;
+            // Need to stop the track here, because that will play out the last
+            // little bit at the end of the file. Otherwise short files won't play.
+            mAudioSink->stop();
+            mNumFramesWritten = 0;
             return false;
         }
 
@@ -663,6 +689,11 @@
             break;
         }
     }
+    mAnchorMaxMediaUs =
+        mAnchorTimeMediaUs +
+                (int64_t)(max((long long)mNumFramesWritten - mAnchorNumFramesWritten, 0LL)
+                        * 1000LL * mAudioSink->msecsPerFrame());
+
     return !mAudioQueue.empty();
 }
 
@@ -674,7 +705,7 @@
 
 int64_t NuPlayer::Renderer::getRealTimeUs(int64_t mediaTimeUs, int64_t nowUs) {
     int64_t currentPositionUs;
-    if (getCurrentPosition(&currentPositionUs, nowUs) != OK) {
+    if (getCurrentPosition(&currentPositionUs, nowUs, true /* allowPastQueuedVideo */) != OK) {
         // If failed to get current position, e.g. due to audio clock is not ready, then just
         // play out video immediately without delay.
         return nowUs;
@@ -690,7 +721,8 @@
     }
     setAudioFirstAnchorTimeIfNeeded(mediaTimeUs);
     int64_t nowUs = ALooper::GetNowUs();
-    setAnchorTime(mediaTimeUs, nowUs + getPendingAudioPlayoutDurationUs(nowUs));
+    setAnchorTime(
+            mediaTimeUs, nowUs + getPendingAudioPlayoutDurationUs(nowUs), mNumFramesWritten);
 }
 
 void NuPlayer::Renderer::postDrainVideoQueue() {
@@ -733,6 +765,9 @@
         } else {
             realTimeUs = getRealTimeUs(mediaTimeUs, nowUs);
         }
+        if (!mHasAudio) {
+            mAnchorMaxMediaUs = mediaTimeUs + 100000; // smooth out videos >= 10fps
+        }
 
         // Heuristics to handle situation when media time changed without a
         // discontinuity. If we have not drained an audio buffer that was
@@ -1103,6 +1138,7 @@
     }
     CHECK(!mDrainAudioQueuePending);
     mNumFramesWritten = 0;
+    mAnchorNumFramesWritten = -1;
     uint32_t written;
     if (mAudioSink->getFramesWritten(&written) == OK) {
         mNumFramesWritten = written;
@@ -1158,7 +1194,8 @@
     if (mPauseStartedTimeRealUs != -1) {
         int64_t newAnchorRealUs =
             mAnchorTimeRealUs + ALooper::GetNowUs() - mPauseStartedTimeRealUs;
-        setAnchorTime(mAnchorTimeMediaUs, newAnchorRealUs, true /* resume */);
+        setAnchorTime(
+                mAnchorTimeMediaUs, newAnchorRealUs, mAnchorNumFramesWritten, true /* resume */);
     }
 
     if (!mAudioQueue.empty()) {
@@ -1341,8 +1378,9 @@
                 return offloadingAudio();
             }
             ALOGV("openAudioSink: try to open AudioSink in offload mode");
-            flags |= AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD;
-            flags &= ~AUDIO_OUTPUT_FLAG_DEEP_BUFFER;
+            uint32_t offloadFlags = flags;
+            offloadFlags |= AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD;
+            offloadFlags &= ~AUDIO_OUTPUT_FLAG_DEEP_BUFFER;
             audioSinkChanged = true;
             mAudioSink->close();
             err = mAudioSink->open(
@@ -1353,7 +1391,7 @@
                     8 /* bufferCount */,
                     &NuPlayer::Renderer::AudioSinkCallback,
                     this,
-                    (audio_output_flags_t)flags,
+                    (audio_output_flags_t)offloadFlags,
                     &offloadInfo);
 
             if (err == OK) {
@@ -1377,9 +1415,9 @@
         }
     }
     if (!offloadOnly && !offloadingAudio()) {
-        flags &= ~AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD;
         ALOGV("openAudioSink: open AudioSink in NON-offload mode");
-
+        uint32_t pcmFlags = flags;
+        pcmFlags &= ~AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD;
         audioSinkChanged = true;
         mAudioSink->close();
         mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER;
@@ -1391,7 +1429,7 @@
                     8 /* bufferCount */,
                     NULL,
                     NULL,
-                    (audio_output_flags_t)flags),
+                    (audio_output_flags_t)pcmFlags),
                  (status_t)OK);
         mAudioSink->start();
     }
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
index b15a266..7b46a59 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
@@ -61,11 +61,13 @@
 
     // Following setters and getters are protected by mTimeLock.
     status_t getCurrentPosition(int64_t *mediaUs);
-    status_t getCurrentPosition(int64_t *mediaUs, int64_t nowUs);
+    status_t getCurrentPosition(
+            int64_t *mediaUs, int64_t nowUs, bool allowPastQueuedVideo = false);
     void setHasMedia(bool audio);
     void setAudioFirstAnchorTime(int64_t mediaUs);
     void setAudioFirstAnchorTimeIfNeeded(int64_t mediaUs);
-    void setAnchorTime(int64_t mediaUs, int64_t realUs, bool resume = false);
+    void setAnchorTime(
+            int64_t mediaUs, int64_t realUs, int64_t numFramesWritten = -1, bool resume = false);
     void setVideoLateByUs(int64_t lateUs);
     int64_t getVideoLateByUs();
     void setPauseStartedTimeRealUs(int64_t realUs);
@@ -148,6 +150,8 @@
     int64_t mAudioFirstAnchorTimeMediaUs;
     int64_t mAnchorTimeMediaUs;
     int64_t mAnchorTimeRealUs;
+    int64_t mAnchorNumFramesWritten;
+    int64_t mAnchorMaxMediaUs;
     int64_t mVideoLateByUs;
     bool mHasAudio;
     bool mHasVideo;
@@ -174,6 +178,7 @@
     int32_t mTotalBuffersQueued;
     int32_t mLastAudioBufferDrained;
 
+
     size_t fillAudioBuffer(void *buffer, size_t size);
 
     bool onDrainAudioQueue();
diff --git a/media/libmediaplayerservice/nuplayer/RTSPSource.cpp b/media/libmediaplayerservice/nuplayer/RTSPSource.cpp
index ffacb8f..52ae9ee 100644
--- a/media/libmediaplayerservice/nuplayer/RTSPSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/RTSPSource.cpp
@@ -506,7 +506,7 @@
             sp<AnotherPacketSource> source = info->mSource;
             if (source != NULL) {
                 source->queueDiscontinuity(
-                        ATSParser::DISCONTINUITY_SEEK,
+                        ATSParser::DISCONTINUITY_TIME,
                         NULL,
                         true /* discard */);
             }
diff --git a/media/libmediaplayerservice/nuplayer/StreamingSource.cpp b/media/libmediaplayerservice/nuplayer/StreamingSource.cpp
index 2e9a29a..27f5159 100644
--- a/media/libmediaplayerservice/nuplayer/StreamingSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/StreamingSource.cpp
@@ -80,7 +80,7 @@
             mFinalResult = ERROR_END_OF_STREAM;
             break;
         } else if (n == INFO_DISCONTINUITY) {
-            int32_t type = ATSParser::DISCONTINUITY_SEEK;
+            int32_t type = ATSParser::DISCONTINUITY_TIME;
 
             int32_t mask;
             if (extra != NULL
@@ -118,7 +118,7 @@
 
                 mTSParser->signalDiscontinuity(
                         ((type & 1) == 0)
-                            ? ATSParser::DISCONTINUITY_SEEK
+                            ? ATSParser::DISCONTINUITY_TIME
                             : ATSParser::DISCONTINUITY_FORMATCHANGE,
                         extra);
             } else {
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index fcc3a5a..4c8a199 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -498,6 +498,10 @@
     sp<AMessage> msg = new AMessage(kWhatShutdown, id());
     msg->setInt32("keepComponentAllocated", keepComponentAllocated);
     msg->post();
+    if (!keepComponentAllocated) {
+        // ensure shutdown completes in 3 seconds
+        (new AMessage(kWhatReleaseCodecInstance, id()))->post(3000000);
+    }
 }
 
 void ACodec::signalRequestIDRFrame() {
@@ -2217,7 +2221,11 @@
 
     video_def->xFramerate = (OMX_U32)(frameRate * 65536.0f);
     video_def->eCompressionFormat = OMX_VIDEO_CodingUnused;
-    video_def->eColorFormat = colorFormat;
+    // this is redundant as it was already set up in setVideoPortFormatType
+    // FIXME for now skip this only for flexible YUV formats
+    if (colorFormat != OMX_COLOR_FormatYUV420Flexible) {
+        video_def->eColorFormat = colorFormat;
+    }
 
     err = mOMX->setParameter(
             mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
@@ -3797,6 +3805,19 @@
             break;
         }
 
+        case ACodec::kWhatReleaseCodecInstance:
+        {
+            ALOGI("[%s] forcing the release of codec",
+                    mCodec->mComponentName.c_str());
+            status_t err = mCodec->mOMX->freeNode(mCodec->mNode);
+            ALOGE_IF("[%s] failed to release codec instance: err=%d",
+                       mCodec->mComponentName.c_str(), err);
+            sp<AMessage> notify = mCodec->mNotify->dup();
+            notify->setInt32("what", CodecBase::kWhatShutdownCompleted);
+            notify->post();
+            break;
+        }
+
         default:
             return false;
     }
@@ -4456,6 +4477,13 @@
             break;
         }
 
+        case ACodec::kWhatReleaseCodecInstance:
+        {
+            // nothing to do, as we have already signaled shutdown
+            handled = true;
+            break;
+        }
+
         default:
             return BaseState::onMessageReceived(msg);
     }
diff --git a/media/libstagefright/AwesomePlayer.cpp b/media/libstagefright/AwesomePlayer.cpp
index ab8ac79..6a56729 100644
--- a/media/libstagefright/AwesomePlayer.cpp
+++ b/media/libstagefright/AwesomePlayer.cpp
@@ -75,7 +75,7 @@
 
 // maximum time in paused state when offloading audio decompression. When elapsed, the AudioPlayer
 // is destroyed to allow the audio DSP to power down.
-static int64_t kOffloadPauseMaxUs = 60000000ll;
+static int64_t kOffloadPauseMaxUs = 10000000ll;
 
 
 struct AwesomeEvent : public TimedEventQueue::Event {
diff --git a/media/libstagefright/CameraSource.cpp b/media/libstagefright/CameraSource.cpp
index 2b50763..c3a940a 100644
--- a/media/libstagefright/CameraSource.cpp
+++ b/media/libstagefright/CameraSource.cpp
@@ -130,6 +130,7 @@
          "CameraSource::getColorFormat", colorFormat);
 
     CHECK(!"Unknown color format");
+    return -1;
 }
 
 CameraSource *CameraSource::Create(const String16 &clientName) {
@@ -677,63 +678,80 @@
 
 void CameraSource::releaseCamera() {
     ALOGV("releaseCamera");
-    if (mCamera != 0) {
-        int64_t token = IPCThreadState::self()->clearCallingIdentity();
-        if ((mCameraFlags & FLAGS_HOT_CAMERA) == 0) {
-            ALOGV("Camera was cold when we started, stopping preview");
-            mCamera->stopPreview();
-            mCamera->disconnect();
-        }
-        mCamera->unlock();
+    sp<Camera> camera;
+    bool coldCamera = false;
+    {
+        Mutex::Autolock autoLock(mLock);
+        // get a local ref and clear ref to mCamera now
+        camera = mCamera;
         mCamera.clear();
-        mCamera = 0;
+        coldCamera = (mCameraFlags & FLAGS_HOT_CAMERA) == 0;
+    }
+
+    if (camera != 0) {
+        int64_t token = IPCThreadState::self()->clearCallingIdentity();
+        if (coldCamera) {
+            ALOGV("Camera was cold when we started, stopping preview");
+            camera->stopPreview();
+            camera->disconnect();
+        }
+        camera->unlock();
         IPCThreadState::self()->restoreCallingIdentity(token);
     }
-    if (mCameraRecordingProxy != 0) {
-        mCameraRecordingProxy->asBinder()->unlinkToDeath(mDeathNotifier);
-        mCameraRecordingProxy.clear();
+
+    {
+        Mutex::Autolock autoLock(mLock);
+        if (mCameraRecordingProxy != 0) {
+            mCameraRecordingProxy->asBinder()->unlinkToDeath(mDeathNotifier);
+            mCameraRecordingProxy.clear();
+        }
+        mCameraFlags = 0;
     }
-    mCameraFlags = 0;
 }
 
 status_t CameraSource::reset() {
     ALOGD("reset: E");
-    Mutex::Autolock autoLock(mLock);
-    mStarted = false;
-    mFrameAvailableCondition.signal();
 
-    int64_t token;
-    bool isTokenValid = false;
-    if (mCamera != 0) {
-        token = IPCThreadState::self()->clearCallingIdentity();
-        isTokenValid = true;
-    }
-    releaseQueuedFrames();
-    while (!mFramesBeingEncoded.empty()) {
-        if (NO_ERROR !=
-            mFrameCompleteCondition.waitRelative(mLock,
-                    mTimeBetweenFrameCaptureUs * 1000LL + CAMERA_SOURCE_TIMEOUT_NS)) {
-            ALOGW("Timed out waiting for outstanding frames being encoded: %zu",
-                mFramesBeingEncoded.size());
+    {
+        Mutex::Autolock autoLock(mLock);
+        mStarted = false;
+        mFrameAvailableCondition.signal();
+
+        int64_t token;
+        bool isTokenValid = false;
+        if (mCamera != 0) {
+            token = IPCThreadState::self()->clearCallingIdentity();
+            isTokenValid = true;
         }
+        releaseQueuedFrames();
+        while (!mFramesBeingEncoded.empty()) {
+            if (NO_ERROR !=
+                mFrameCompleteCondition.waitRelative(mLock,
+                        mTimeBetweenFrameCaptureUs * 1000LL + CAMERA_SOURCE_TIMEOUT_NS)) {
+                ALOGW("Timed out waiting for outstanding frames being encoded: %zu",
+                    mFramesBeingEncoded.size());
+            }
+        }
+        stopCameraRecording();
+        if (isTokenValid) {
+            IPCThreadState::self()->restoreCallingIdentity(token);
+        }
+
+        if (mCollectStats) {
+            ALOGI("Frames received/encoded/dropped: %d/%d/%d in %" PRId64 " us",
+                    mNumFramesReceived, mNumFramesEncoded, mNumFramesDropped,
+                    mLastFrameTimestampUs - mFirstFrameTimeUs);
+        }
+
+        if (mNumGlitches > 0) {
+            ALOGW("%d long delays between neighboring video frames", mNumGlitches);
+        }
+
+        CHECK_EQ(mNumFramesReceived, mNumFramesEncoded + mNumFramesDropped);
     }
-    stopCameraRecording();
+
     releaseCamera();
-    if (isTokenValid) {
-        IPCThreadState::self()->restoreCallingIdentity(token);
-    }
 
-    if (mCollectStats) {
-        ALOGI("Frames received/encoded/dropped: %d/%d/%d in %" PRId64 " us",
-                mNumFramesReceived, mNumFramesEncoded, mNumFramesDropped,
-                mLastFrameTimestampUs - mFirstFrameTimeUs);
-    }
-
-    if (mNumGlitches > 0) {
-        ALOGW("%d long delays between neighboring video frames", mNumGlitches);
-    }
-
-    CHECK_EQ(mNumFramesReceived, mNumFramesEncoded + mNumFramesDropped);
     ALOGD("reset: X");
     return OK;
 }
diff --git a/media/libstagefright/MPEG4Writer.cpp b/media/libstagefright/MPEG4Writer.cpp
index 4b8440b..9f20b1d 100644
--- a/media/libstagefright/MPEG4Writer.cpp
+++ b/media/libstagefright/MPEG4Writer.cpp
@@ -41,8 +41,13 @@
 
 #include "include/ESDS.h"
 
+
+#ifndef __predict_false
+#define __predict_false(exp) __builtin_expect((exp) != 0, 0)
+#endif
+
 #define WARN_UNLESS(condition, message, ...) \
-( (CONDITION(condition)) ? false : ({ \
+( (__predict_false(condition)) ? false : ({ \
     ALOGW("Condition %s failed "  message, #condition, ##__VA_ARGS__); \
     true; \
 }))
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index 5f55484..d7ddc89 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -738,6 +738,7 @@
                             err, actionCode, mState);
                     if (err == DEAD_OBJECT) {
                         mFlags |= kFlagSawMediaServerDie;
+                        mFlags &= ~kFlagIsComponentAllocated;
                     }
 
                     bool sendErrorResponse = true;
@@ -863,6 +864,7 @@
                 {
                     CHECK_EQ(mState, INITIALIZING);
                     setState(INITIALIZED);
+                    mFlags |= kFlagIsComponentAllocated;
 
                     CHECK(msg->findString("componentName", &mComponentName));
 
@@ -1009,6 +1011,16 @@
                         mFlags |= kFlagOutputFormatChanged;
                         postActivityNotificationIfPossible();
                     }
+
+                    // Notify mCrypto of video resolution changes
+                    if (mCrypto != NULL) {
+                      int32_t height, width;
+                      if (mOutputFormat->findInt32("height", &height) &&
+                          mOutputFormat->findInt32("width", &width)) {
+                        mCrypto->notifyResolution(width, height);
+                      }
+                    }
+
                     break;
                 }
 
@@ -1136,6 +1148,7 @@
                         setState(UNINITIALIZED);
                         mComponentName.clear();
                     }
+                    mFlags &= ~kFlagIsComponentAllocated;
 
                     (new AMessage)->postReply(mReplyID);
                     break;
@@ -1336,9 +1349,10 @@
             uint32_t replyID;
             CHECK(msg->senderAwaitsResponse(&replyID));
 
-            if (mState != INITIALIZED
+            if (!(mFlags & kFlagIsComponentAllocated) && mState != INITIALIZED
                     && mState != CONFIGURED && !isExecuting()) {
-                // We may be in "UNINITIALIZED" state already without the
+                // We may be in "UNINITIALIZED" state already and
+                // also shutdown the encoder/decoder without the
                 // client being aware of this if media server died while
                 // we were being stopped. The client would assume that
                 // after stop() returned, it would be safe to call release()
diff --git a/media/libstagefright/OMXCodec.cpp b/media/libstagefright/OMXCodec.cpp
index a8806c8..288e07a 100644
--- a/media/libstagefright/OMXCodec.cpp
+++ b/media/libstagefright/OMXCodec.cpp
@@ -817,6 +817,7 @@
             CHECK(!"Should not be here. Unsupported color format.");
             break;
     }
+    return 0;
 }
 
 status_t OMXCodec::findTargetColorFormat(
diff --git a/media/libstagefright/SurfaceMediaSource.cpp b/media/libstagefright/SurfaceMediaSource.cpp
index 4e1c65c..530383b 100644
--- a/media/libstagefright/SurfaceMediaSource.cpp
+++ b/media/libstagefright/SurfaceMediaSource.cpp
@@ -448,7 +448,7 @@
 }
 
 // Part of the BufferQueue::ConsumerListener
-void SurfaceMediaSource::onFrameAvailable() {
+void SurfaceMediaSource::onFrameAvailable(const BufferItem& /* item */) {
     ALOGV("onFrameAvailable");
 
     sp<FrameAvailableListener> listener;
diff --git a/media/libstagefright/codecs/aacdec/SoftAAC2.cpp b/media/libstagefright/codecs/aacdec/SoftAAC2.cpp
index 40925fd..351ba1e 100644
--- a/media/libstagefright/codecs/aacdec/SoftAAC2.cpp
+++ b/media/libstagefright/codecs/aacdec/SoftAAC2.cpp
@@ -929,33 +929,22 @@
         }
 
         if (mEndOfInput) {
-            if (outputDelayRingBufferSamplesAvailable() > 0
-                    && outputDelayRingBufferSamplesAvailable()
-                            < mStreamInfo->frameSize * mStreamInfo->numChannels) {
-                ALOGE("not a complete frame of samples available");
-                mSignalledError = true;
-                notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL);
-                return;
-            }
-
-            if (mEndOfInput && !outQueue.empty() && outputDelayRingBufferSamplesAvailable() == 0) {
+            int ringBufAvail = outputDelayRingBufferSamplesAvailable();
+            if (!outQueue.empty()
+                    && ringBufAvail < mStreamInfo->frameSize * mStreamInfo->numChannels) {
                 if (!mEndOfOutput) {
-                    // send empty block signaling EOS
+                    // send partial or empty block signaling EOS
                     mEndOfOutput = true;
                     BufferInfo *outInfo = *outQueue.begin();
                     OMX_BUFFERHEADERTYPE *outHeader = outInfo->mHeader;
 
-                    if (outHeader->nOffset != 0) {
-                        ALOGE("outHeader->nOffset != 0 is not handled");
-                        mSignalledError = true;
-                        notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL);
-                        return;
-                    }
-
                     INT_PCM *outBuffer = reinterpret_cast<INT_PCM *>(outHeader->pBuffer
                             + outHeader->nOffset);
-                    int32_t ns = 0;
-                    outHeader->nFilledLen = 0;
+                    int32_t ns = outputDelayRingBufferGetSamples(outBuffer, ringBufAvail);
+                    if (ns < 0) {
+                        ns = 0;
+                    }
+                    outHeader->nFilledLen = ns;
                     outHeader->nFlags = OMX_BUFFERFLAG_EOS;
 
                     outHeader->nTimeStamp = mBufferTimestamps.itemAt(0);
@@ -994,7 +983,7 @@
             }
             int32_t ns = outputDelayRingBufferGetSamples(0, avail);
             if (ns != avail) {
-                ALOGE("not a complete frame of samples available");
+                ALOGW("not a complete frame of samples available");
                 break;
             }
             mOutputBufferCount++;
diff --git a/media/libstagefright/codecs/avc/enc/SoftAVCEncoder.cpp b/media/libstagefright/codecs/avc/enc/SoftAVCEncoder.cpp
index 1513b0b..cfc37b7 100644
--- a/media/libstagefright/codecs/avc/enc/SoftAVCEncoder.cpp
+++ b/media/libstagefright/codecs/avc/enc/SoftAVCEncoder.cpp
@@ -575,9 +575,13 @@
                     &editPortInfo(0)->mDef;
                 portDef->format.video.nFrameWidth = mVideoWidth;
                 portDef->format.video.nFrameHeight = mVideoHeight;
+                portDef->format.video.nStride = portDef->format.video.nFrameWidth;
+                portDef->format.video.nSliceHeight = portDef->format.video.nFrameHeight;
                 portDef->format.video.xFramerate = def->format.video.xFramerate;
                 portDef->format.video.eColorFormat =
                     (OMX_COLOR_FORMATTYPE) mVideoColorFormat;
+                portDef->nBufferSize =
+                    (portDef->format.video.nStride * portDef->format.video.nSliceHeight * 3) / 2;
                 portDef = &editPortInfo(1)->mDef;
                 portDef->format.video.nFrameWidth = mVideoWidth;
                 portDef->format.video.nFrameHeight = mVideoHeight;
diff --git a/media/libstagefright/codecs/m4v_h263/dec/SoftMPEG4.cpp b/media/libstagefright/codecs/m4v_h263/dec/SoftMPEG4.cpp
index 1f4b6fd..e399984 100644
--- a/media/libstagefright/codecs/m4v_h263/dec/SoftMPEG4.cpp
+++ b/media/libstagefright/codecs/m4v_h263/dec/SoftMPEG4.cpp
@@ -382,5 +382,6 @@
     } else {
         CHECK(!"Unknown component");
     }
+    return NULL;
 }
 
diff --git a/media/libstagefright/codecs/m4v_h263/enc/SoftMPEG4Encoder.cpp b/media/libstagefright/codecs/m4v_h263/enc/SoftMPEG4Encoder.cpp
index 28edff8..1d0a2f0 100644
--- a/media/libstagefright/codecs/m4v_h263/enc/SoftMPEG4Encoder.cpp
+++ b/media/libstagefright/codecs/m4v_h263/enc/SoftMPEG4Encoder.cpp
@@ -462,9 +462,13 @@
                     &editPortInfo(0)->mDef;
                 portDef->format.video.nFrameWidth = mVideoWidth;
                 portDef->format.video.nFrameHeight = mVideoHeight;
+                portDef->format.video.nStride = portDef->format.video.nFrameWidth;
+                portDef->format.video.nSliceHeight = portDef->format.video.nFrameHeight;
                 portDef->format.video.xFramerate = def->format.video.xFramerate;
                 portDef->format.video.eColorFormat =
                     (OMX_COLOR_FORMATTYPE) mVideoColorFormat;
+                portDef->nBufferSize =
+                    (portDef->format.video.nStride * portDef->format.video.nSliceHeight * 3) / 2;
                 portDef = &editPortInfo(1)->mDef;
                 portDef->format.video.nFrameWidth = mVideoWidth;
                 portDef->format.video.nFrameHeight = mVideoHeight;
diff --git a/media/libstagefright/codecs/on2/dec/SoftVPX.cpp b/media/libstagefright/codecs/on2/dec/SoftVPX.cpp
index 828577a..87d6961 100644
--- a/media/libstagefright/codecs/on2/dec/SoftVPX.cpp
+++ b/media/libstagefright/codecs/on2/dec/SoftVPX.cpp
@@ -189,4 +189,5 @@
     } else {
         CHECK(!"Unknown component");
     }
+    return NULL;
 }
diff --git a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp
index eb621d5..0285feb 100644
--- a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp
+++ b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp
@@ -805,8 +805,12 @@
         OMX_PARAM_PORTDEFINITIONTYPE *def = &editPortInfo(kInputPortIndex)->mDef;
         def->format.video.nFrameWidth = mWidth;
         def->format.video.nFrameHeight = mHeight;
+        def->format.video.nStride = def->format.video.nFrameWidth;
+        def->format.video.nSliceHeight = def->format.video.nFrameHeight;
         def->format.video.xFramerate = mFramerate;
         def->format.video.eColorFormat = mColorFormat;
+        def->nBufferSize =
+            (def->format.video.nStride * def->format.video.nSliceHeight * 3) / 2;
         def = &editPortInfo(kOutputPortIndex)->mDef;
         def->format.video.nFrameWidth = mWidth;
         def->format.video.nFrameHeight = mHeight;
diff --git a/media/libstagefright/httplive/LiveSession.cpp b/media/libstagefright/httplive/LiveSession.cpp
index fba6b09..874c118 100644
--- a/media/libstagefright/httplive/LiveSession.cpp
+++ b/media/libstagefright/httplive/LiveSession.cpp
@@ -1458,7 +1458,7 @@
                     extra->setInt64("timeUs", timeUs);
                     discontinuityQueue = mDiscontinuities.valueFor(indexToType(j));
                     discontinuityQueue->queueDiscontinuity(
-                            ATSParser::DISCONTINUITY_SEEK, extra, true);
+                            ATSParser::DISCONTINUITY_TIME, extra, true);
                 } else {
                     int32_t type;
                     int64_t srcSegmentStartTimeUs;
diff --git a/media/libstagefright/httplive/PlaylistFetcher.cpp b/media/libstagefright/httplive/PlaylistFetcher.cpp
index 30fa868..eb3154a 100644
--- a/media/libstagefright/httplive/PlaylistFetcher.cpp
+++ b/media/libstagefright/httplive/PlaylistFetcher.cpp
@@ -1155,7 +1155,7 @@
         extra->setInt64(IStreamListener::kKeyMediaTimeUs, 0);
 
         mTSParser->signalDiscontinuity(
-                ATSParser::DISCONTINUITY_SEEK, extra);
+                ATSParser::DISCONTINUITY_TIME, extra);
 
         mAbsoluteTimeAnchorUs = mNextPTSTimeUs;
         mNextPTSTimeUs = -1ll;
@@ -1587,6 +1587,7 @@
                 mStartTimeUsNotify->setInt32("streamMask", LiveSession::STREAMTYPE_AUDIO);
                 mStartTimeUsNotify->post();
                 mStartTimeUsNotify.clear();
+                mStartup = false;
             }
         }
 
diff --git a/media/libstagefright/matroska/MatroskaExtractor.cpp b/media/libstagefright/matroska/MatroskaExtractor.cpp
index 2587ec7..4f0862c 100644
--- a/media/libstagefright/matroska/MatroskaExtractor.cpp
+++ b/media/libstagefright/matroska/MatroskaExtractor.cpp
@@ -413,16 +413,16 @@
 
     const mkvparser::CuePoint* pCP;
     mkvparser::Tracks const *pTracks = pSegment->GetTracks();
-    unsigned long int trackCount = pTracks->GetTracksCount();
     while (!pCues->DoneParsing()) {
         pCues->LoadCuePoint();
         pCP = pCues->GetLast();
         CHECK(pCP);
 
+        size_t trackCount = mExtractor->mTracks.size();
         for (size_t index = 0; index < trackCount; ++index) {
-            const mkvparser::Track *pTrack = pTracks->GetTrackByIndex(index);
+            MatroskaExtractor::TrackInfo& track = mExtractor->mTracks.editItemAt(index);
+            const mkvparser::Track *pTrack = pTracks->GetTrackByNumber(track.mTrackNum);
             if (pTrack && pTrack->GetType() == 1 && pCP->Find(pTrack)) { // VIDEO_TRACK
-                MatroskaExtractor::TrackInfo& track = mExtractor->mTracks.editItemAt(index);
                 track.mCuePoints.push_back(pCP);
             }
         }
@@ -434,12 +434,13 @@
     }
 
     const mkvparser::CuePoint::TrackPosition *pTP = NULL;
-    const mkvparser::Track *thisTrack = pTracks->GetTrackByIndex(mIndex);
+    const mkvparser::Track *thisTrack = pTracks->GetTrackByNumber(mTrackNum);
     if (thisTrack->GetType() == 1) { // video
         MatroskaExtractor::TrackInfo& track = mExtractor->mTracks.editItemAt(mIndex);
         pTP = track.find(seekTimeNs);
     } else {
         // The Cue index is built around video keyframes
+        unsigned long int trackCount = pTracks->GetTracksCount();
         for (size_t index = 0; index < trackCount; ++index) {
             const mkvparser::Track *pTrack = pTracks->GetTrackByIndex(index);
             if (pTrack && pTrack->GetType() == 1 && pCues->Find(seekTimeNs, pTrack, pCP, pTP)) {
diff --git a/media/libstagefright/mpeg2ts/ATSParser.cpp b/media/libstagefright/mpeg2ts/ATSParser.cpp
index 6d8866a..eab7616 100644
--- a/media/libstagefright/mpeg2ts/ATSParser.cpp
+++ b/media/libstagefright/mpeg2ts/ATSParser.cpp
@@ -244,11 +244,16 @@
 status_t ATSParser::Program::parseProgramMap(ABitReader *br) {
     unsigned table_id = br->getBits(8);
     ALOGV("  table_id = %u", table_id);
-    CHECK_EQ(table_id, 0x02u);
-
+    if (table_id != 0x02u) {
+        ALOGE("PMT data error!");
+        return ERROR_MALFORMED;
+    }
     unsigned section_syntax_indicator = br->getBits(1);
     ALOGV("  section_syntax_indicator = %u", section_syntax_indicator);
-    CHECK_EQ(section_syntax_indicator, 1u);
+    if (section_syntax_indicator != 1u) {
+        ALOGE("PMT data error!");
+        return ERROR_MALFORMED;
+    }
 
     CHECK_EQ(br->getBits(1), 0u);
     MY_LOGV("  reserved = %u", br->getBits(2));
@@ -739,8 +744,10 @@
         if (PTS_DTS_flags == 2 || PTS_DTS_flags == 3) {
             CHECK_GE(optional_bytes_remaining, 5u);
 
-            CHECK_EQ(br->getBits(4), PTS_DTS_flags);
-
+            if (br->getBits(4) != PTS_DTS_flags) {
+                ALOGE("PES data Error!");
+                return ERROR_MALFORMED;
+            }
             PTS = ((uint64_t)br->getBits(3)) << 30;
             CHECK_EQ(br->getBits(1), 1u);
             PTS |= ((uint64_t)br->getBits(15)) << 15;
@@ -1003,8 +1010,10 @@
 void ATSParser::parseProgramAssociationTable(ABitReader *br) {
     unsigned table_id = br->getBits(8);
     ALOGV("  table_id = %u", table_id);
-    CHECK_EQ(table_id, 0x00u);
-
+    if (table_id != 0x00u) {
+        ALOGE("PAT data error!");
+        return ;
+    }
     unsigned section_syntax_indictor = br->getBits(1);
     ALOGV("  section_syntax_indictor = %u", section_syntax_indictor);
     CHECK_EQ(section_syntax_indictor, 1u);
@@ -1074,7 +1083,9 @@
         sp<PSISection> section = mPSISections.valueAt(sectionIndex);
 
         if (payload_unit_start_indicator) {
-            CHECK(section->isEmpty());
+            if (!section->isEmpty()) {
+                return ERROR_UNSUPPORTED;
+            }
 
             unsigned skip = br->getBits(8);
             br->skipBits(skip * 8);
@@ -1203,7 +1214,10 @@
     ALOGV("---");
 
     unsigned sync_byte = br->getBits(8);
-    CHECK_EQ(sync_byte, 0x47u);
+    if (sync_byte != 0x47u) {
+        ALOGE("[error] parseTS: return error as sync_byte=0x%x", sync_byte);
+        return BAD_VALUE;
+    }
 
     if (br->getBits(1)) {  // transport_error_indicator
         // silently ignore.
diff --git a/media/libstagefright/mpeg2ts/ATSParser.h b/media/libstagefright/mpeg2ts/ATSParser.h
index 8986a22..204934d 100644
--- a/media/libstagefright/mpeg2ts/ATSParser.h
+++ b/media/libstagefright/mpeg2ts/ATSParser.h
@@ -41,8 +41,6 @@
         DISCONTINUITY_ABSOLUTE_TIME     = 8,
         DISCONTINUITY_TIME_OFFSET       = 16,
 
-        DISCONTINUITY_SEEK              = DISCONTINUITY_TIME,
-
         // For legacy reasons this also implies a time discontinuity.
         DISCONTINUITY_FORMATCHANGE      =
             DISCONTINUITY_AUDIO_FORMAT
diff --git a/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp b/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp
index a03f6f9..ed40bdd 100644
--- a/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp
+++ b/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp
@@ -262,6 +262,10 @@
         }
     }
 
+    if (type == ATSParser::DISCONTINUITY_NONE) {
+        return;
+    }
+
     mEOSResult = OK;
     mLastQueuedTimeUs = 0;
     mLatestEnqueuedMeta = NULL;
diff --git a/media/libstagefright/omx/GraphicBufferSource.cpp b/media/libstagefright/omx/GraphicBufferSource.cpp
index 3e70956..44c7edc 100644
--- a/media/libstagefright/omx/GraphicBufferSource.cpp
+++ b/media/libstagefright/omx/GraphicBufferSource.cpp
@@ -750,7 +750,7 @@
 }
 
 // BufferQueue::ConsumerListener callback
-void GraphicBufferSource::onFrameAvailable() {
+void GraphicBufferSource::onFrameAvailable(const BufferItem& /*item*/) {
     Mutex::Autolock autoLock(mMutex);
 
     ALOGV("onFrameAvailable exec=%d avail=%zu",
diff --git a/media/libstagefright/omx/GraphicBufferSource.h b/media/libstagefright/omx/GraphicBufferSource.h
index c0860ab..c8e3775 100644
--- a/media/libstagefright/omx/GraphicBufferSource.h
+++ b/media/libstagefright/omx/GraphicBufferSource.h
@@ -137,7 +137,7 @@
     // into the codec buffer, and call Empty[This]Buffer.  If we're not yet
     // executing or there's no codec buffer available, we just increment
     // mNumFramesAvailable and return.
-    virtual void onFrameAvailable();
+    virtual void onFrameAvailable(const BufferItem& item);
 
     // BufferQueue::ConsumerListener interface, called when the client has
     // released one or more GraphicBuffers.  We clear out the appropriate
diff --git a/media/libstagefright/omx/OMXNodeInstance.cpp b/media/libstagefright/omx/OMXNodeInstance.cpp
index d07ec14..f9c84e2 100644
--- a/media/libstagefright/omx/OMXNodeInstance.cpp
+++ b/media/libstagefright/omx/OMXNodeInstance.cpp
@@ -149,6 +149,11 @@
 status_t OMXNodeInstance::freeNode(OMXMaster *master) {
     static int32_t kMaxNumIterations = 10;
 
+    // exit if we have already freed the node
+    if (mHandle == NULL) {
+        return OK;
+    }
+
     // Transition the node from its current state all the way down
     // to "Loaded".
     // This ensures that all active buffers are properly freed even
diff --git a/media/ndk/NdkMediaExtractor.cpp b/media/ndk/NdkMediaExtractor.cpp
index 970a43c..db57d0b 100644
--- a/media/ndk/NdkMediaExtractor.cpp
+++ b/media/ndk/NdkMediaExtractor.cpp
@@ -256,18 +256,23 @@
         len -= datalen;
     }
 
-    // there are <numentries> in the buffer, we need
-    // (source buffer size) + 4 + (4 * numentries) bytes for the PsshInfo structure
-    size_t newsize = buffer->size() + 4 + (4 * numentries);
+    // there are <numentries> in the source buffer, we need
+    // (source buffer size) - (sizeof(uint32_t) * numentries) + sizeof(size_t)
+    //  + ((sizeof(void*) + sizeof(size_t)) * numentries) bytes for the PsshInfo structure
+    // Or in other words, the data lengths in the source structure are replaced by size_t
+    // (which may be the same size or larger, for 64 bit), and in addition there is an
+    // extra pointer for each entry, and an extra size_t for the entire PsshInfo.
+    size_t newsize = buffer->size() - (sizeof(uint32_t) * numentries) + sizeof(size_t)
+            + ((sizeof(void*) + sizeof(size_t)) * numentries);
     ex->mPsshBuf = new ABuffer(newsize);
     ex->mPsshBuf->setRange(0, newsize);
 
     // copy data
     const uint8_t* src = buffer->data();
     uint8_t* dst = ex->mPsshBuf->data();
-    uint8_t* dstdata = dst + 4 + numentries * sizeof(PsshEntry);
-    *((uint32_t*)dst) = numentries;
-    dst += 4;
+    uint8_t* dstdata = dst + sizeof(size_t) + numentries * sizeof(PsshEntry);
+    *((size_t*)dst) = numentries;
+    dst += sizeof(size_t);
     for (size_t i = 0; i < numentries; i++) {
         // copy uuid
         memcpy(dst, src, 16);
@@ -276,14 +281,14 @@
 
         // get/copy data length
         uint32_t datalen = *((uint32_t*)src);
-        memcpy(dst, src, 4);
-        src += 4;
-        dst += 4;
+        *((size_t*)dst) = datalen;
+        src += sizeof(uint32_t);
+        dst += sizeof(size_t);
 
         // the next entry in the destination is a pointer to the actual data, which we store
         // after the array of PsshEntry
-        memcpy(dst, &dstdata, sizeof(dstdata));
-        dst += 4;
+        *((void**)dst) = dstdata;
+        dst += sizeof(void*);
 
         // copy the actual data
         memcpy(dstdata, src, datalen);
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index e48af20..ea9d7d3 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -650,6 +650,7 @@
             }
         }
 
+        setAudioHwSyncForSession_l(thread, (audio_session_t)lSessionId);
     }
 
     if (lStatus != NO_ERROR) {
@@ -1604,22 +1605,69 @@
 audio_hw_sync_t AudioFlinger::getAudioHwSyncForSession(audio_session_t sessionId)
 {
     Mutex::Autolock _l(mLock);
-    for (size_t i = 0; i < mPlaybackThreads.size(); i++) {
-        sp<PlaybackThread> thread = mPlaybackThreads.valueAt(i);
-        if ((thread->hasAudioSession(sessionId) & ThreadBase::TRACK_SESSION) != 0) {
-            // A session can only be on one thread, so exit after first match
-            String8 reply = thread->getParameters(String8(AUDIO_PARAMETER_STREAM_HW_AV_SYNC));
-            AudioParameter param = AudioParameter(reply);
-            int value;
-            if (param.getInt(String8(AUDIO_PARAMETER_STREAM_HW_AV_SYNC), value) == NO_ERROR) {
-                return value;
-            }
+
+    ssize_t index = mHwAvSyncIds.indexOfKey(sessionId);
+    if (index >= 0) {
+        ALOGV("getAudioHwSyncForSession found ID %d for session %d",
+              mHwAvSyncIds.valueAt(index), sessionId);
+        return mHwAvSyncIds.valueAt(index);
+    }
+
+    audio_hw_device_t *dev = mPrimaryHardwareDev->hwDevice();
+    if (dev == NULL) {
+        return AUDIO_HW_SYNC_INVALID;
+    }
+    char *reply = dev->get_parameters(dev, AUDIO_PARAMETER_HW_AV_SYNC);
+    AudioParameter param = AudioParameter(String8(reply));
+    free(reply);
+
+    int value;
+    if (param.getInt(String8(AUDIO_PARAMETER_HW_AV_SYNC), value) != NO_ERROR) {
+        ALOGW("getAudioHwSyncForSession error getting sync for session %d", sessionId);
+        return AUDIO_HW_SYNC_INVALID;
+    }
+
+    // allow only one session for a given HW A/V sync ID.
+    for (size_t i = 0; i < mHwAvSyncIds.size(); i++) {
+        if (mHwAvSyncIds.valueAt(i) == (audio_hw_sync_t)value) {
+            ALOGV("getAudioHwSyncForSession removing ID %d for session %d",
+                  value, mHwAvSyncIds.keyAt(i));
+            mHwAvSyncIds.removeItemsAt(i);
             break;
         }
     }
-    return AUDIO_HW_SYNC_INVALID;
+
+    mHwAvSyncIds.add(sessionId, value);
+
+    for (size_t i = 0; i < mPlaybackThreads.size(); i++) {
+        sp<PlaybackThread> thread = mPlaybackThreads.valueAt(i);
+        uint32_t sessions = thread->hasAudioSession(sessionId);
+        if (sessions & PlaybackThread::TRACK_SESSION) {
+            AudioParameter param = AudioParameter();
+            param.addInt(String8(AUDIO_PARAMETER_STREAM_HW_AV_SYNC), value);
+            thread->setParameters(param.toString());
+            break;
+        }
+    }
+
+    ALOGV("getAudioHwSyncForSession adding ID %d for session %d", value, sessionId);
+    return (audio_hw_sync_t)value;
 }
 
+// setAudioHwSyncForSession_l() must be called with AudioFlinger::mLock held
+void AudioFlinger::setAudioHwSyncForSession_l(PlaybackThread *thread, audio_session_t sessionId)
+{
+    ssize_t index = mHwAvSyncIds.indexOfKey(sessionId);
+    if (index >= 0) {
+        audio_hw_sync_t syncId = mHwAvSyncIds.valueAt(index);
+        ALOGV("setAudioHwSyncForSession_l found ID %d for session %d", syncId, sessionId);
+        AudioParameter param = AudioParameter();
+        param.addInt(String8(AUDIO_PARAMETER_STREAM_HW_AV_SYNC), syncId);
+        thread->setParameters(param.toString());
+    }
+}
+
+
 // ----------------------------------------------------------------------------
 
 
@@ -1928,13 +1976,13 @@
     status_t status = inHwHal->open_input_stream(inHwHal, *input, device, &halconfig,
                                         &inStream, flags, address.string(), source);
     ALOGV("openInput_l() openInputStream returned input %p, SamplingRate %d"
-           ", Format %#x, Channels %x, flags %#x, status %d",
+           ", Format %#x, Channels %x, flags %#x, status %d addr %s",
             inStream,
             halconfig.sample_rate,
             halconfig.format,
             halconfig.channel_mask,
             flags,
-            status);
+            status, address.string());
 
     // If the input could not be opened with the requested parameters and we can handle the
     // conversion internally, try to open again with the proposed parameters. The AudioFlinger can
@@ -2585,7 +2633,8 @@
     // Check whether the destination thread has a channel count of FCC_2, which is
     // currently required for (most) effects. Prevent moving the effect chain here rather
     // than disabling the addEffect_l() call in dstThread below.
-    if (dstThread->mChannelCount != FCC_2) {
+    if ((dstThread->type() == ThreadBase::MIXER || dstThread->type() == ThreadBase::DUPLICATING) &&
+            dstThread->mChannelCount != FCC_2) {
         ALOGW("moveEffectChain_l() effect chain failed because"
                 " destination thread %p channel count(%u) != %u",
                 dstThread, dstThread->mChannelCount, FCC_2);
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index 1003017..4fb372d 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -733,6 +733,8 @@
                 // Effect chains without a valid thread
                 DefaultKeyedVector< audio_session_t , sp<EffectChain> > mOrphanEffectChains;
 
+                // list of sessions for which a valid HW A/V sync ID was retrieved from the HAL
+                DefaultKeyedVector< audio_session_t , audio_hw_sync_t >mHwAvSyncIds;
 private:
     sp<Client>  registerPid(pid_t pid);    // always returns non-0
 
@@ -741,6 +743,7 @@
     void        closeOutputInternal_l(sp<PlaybackThread> thread);
     status_t    closeInput_nonvirtual(audio_io_handle_t input);
     void        closeInputInternal_l(sp<RecordThread> thread);
+    void        setAudioHwSyncForSession_l(PlaybackThread *thread, audio_session_t sessionId);
 
 #ifdef TEE_SINK
     // all record threads serially share a common tee sink, which is re-created on format change
diff --git a/services/audioflinger/ServiceUtilities.cpp b/services/audioflinger/ServiceUtilities.cpp
index 8246fef..fae19a1 100644
--- a/services/audioflinger/ServiceUtilities.cpp
+++ b/services/audioflinger/ServiceUtilities.cpp
@@ -50,6 +50,13 @@
     return ok;
 }
 
+bool captureFmTunerAllowed() {
+    static const String16 sCaptureFmTunerAllowed("android.permission.ACCESS_FM_RADIO");
+    bool ok = checkCallingPermission(sCaptureFmTunerAllowed);
+    if (!ok) ALOGE("android.permission.ACCESS_FM_RADIO");
+    return ok;
+}
+
 bool settingsAllowed() {
     if (getpid_cached == IPCThreadState::self()->getCallingPid()) return true;
     static const String16 sAudioSettings("android.permission.MODIFY_AUDIO_SETTINGS");
diff --git a/services/audioflinger/ServiceUtilities.h b/services/audioflinger/ServiceUtilities.h
index df6f6f4..ce18a90 100644
--- a/services/audioflinger/ServiceUtilities.h
+++ b/services/audioflinger/ServiceUtilities.h
@@ -23,6 +23,7 @@
 bool recordingAllowed();
 bool captureAudioOutputAllowed();
 bool captureHotwordAllowed();
+bool captureFmTunerAllowed();
 bool settingsAllowed();
 bool modifyAudioRoutingAllowed();
 bool dumpAllowed();
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index e443476..71a6a73 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -4660,7 +4660,11 @@
     if (outputsReady(outputTracks)) {
         mAudioMixer->process(AudioBufferProvider::kInvalidPTS);
     } else {
-        memset(mSinkBuffer, 0, mSinkBufferSize);
+        if (mMixerBufferValid) {
+            memset(mMixerBuffer, 0, mMixerBufferSize);
+        } else {
+            memset(mSinkBuffer, 0, mSinkBufferSize);
+        }
     }
     sleepTime = 0;
     writeFrames = mNormalFrameCount;
diff --git a/services/audiopolicy/AudioPolicyEffects.cpp b/services/audiopolicy/AudioPolicyEffects.cpp
index 3c1c042..e7e1b36 100644
--- a/services/audiopolicy/AudioPolicyEffects.cpp
+++ b/services/audiopolicy/AudioPolicyEffects.cpp
@@ -105,26 +105,28 @@
     inputDesc->mRefCount++;
 
     ALOGV("addInputEffects(): input: %d, refCount: %d", input, inputDesc->mRefCount);
-
-    Vector <EffectDesc *> effects = mInputSources.valueAt(index)->mEffects;
-    for (size_t i = 0; i < effects.size(); i++) {
-        EffectDesc *effect = effects[i];
-        sp<AudioEffect> fx = new AudioEffect(NULL, &effect->mUuid, -1, 0, 0, audioSession, input);
-        status_t status = fx->initCheck();
-        if (status != NO_ERROR && status != ALREADY_EXISTS) {
-            ALOGW("addInputEffects(): failed to create Fx %s on source %d",
+    if (inputDesc->mRefCount == 1) {
+        Vector <EffectDesc *> effects = mInputSources.valueAt(index)->mEffects;
+        for (size_t i = 0; i < effects.size(); i++) {
+            EffectDesc *effect = effects[i];
+            sp<AudioEffect> fx = new AudioEffect(NULL, &effect->mUuid, -1, 0, 0,
+                                                 audioSession, input);
+            status_t status = fx->initCheck();
+            if (status != NO_ERROR && status != ALREADY_EXISTS) {
+                ALOGW("addInputEffects(): failed to create Fx %s on source %d",
+                      effect->mName, (int32_t)aliasSource);
+                // fx goes out of scope and strong ref on AudioEffect is released
+                continue;
+            }
+            for (size_t j = 0; j < effect->mParams.size(); j++) {
+                fx->setParameter(effect->mParams[j]);
+            }
+            ALOGV("addInputEffects(): added Fx %s on source: %d",
                   effect->mName, (int32_t)aliasSource);
-            // fx goes out of scope and strong ref on AudioEffect is released
-            continue;
+            inputDesc->mEffects.add(fx);
         }
-        for (size_t j = 0; j < effect->mParams.size(); j++) {
-            fx->setParameter(effect->mParams[j]);
-        }
-        ALOGV("addInputEffects(): added Fx %s on source: %d", effect->mName, (int32_t)aliasSource);
-        inputDesc->mEffects.add(fx);
+        inputDesc->setProcessorEnabled(true);
     }
-    inputDesc->setProcessorEnabled(true);
-
     return status;
 }
 
@@ -241,26 +243,28 @@
     }
     procDesc->mRefCount++;
 
-    ALOGV("addOutputSessionEffects(): session: %d, refCount: %d", audioSession, procDesc->mRefCount);
-
-    Vector <EffectDesc *> effects = mOutputStreams.valueAt(index)->mEffects;
-    for (size_t i = 0; i < effects.size(); i++) {
-        EffectDesc *effect = effects[i];
-        sp<AudioEffect> fx = new AudioEffect(NULL, &effect->mUuid, 0, 0, 0, audioSession, output);
-        status_t status = fx->initCheck();
-        if (status != NO_ERROR && status != ALREADY_EXISTS) {
-            ALOGE("addOutputSessionEffects(): failed to create Fx  %s on session %d",
-                  effect->mName, audioSession);
-            // fx goes out of scope and strong ref on AudioEffect is released
-            continue;
+    ALOGV("addOutputSessionEffects(): session: %d, refCount: %d",
+          audioSession, procDesc->mRefCount);
+    if (procDesc->mRefCount == 1) {
+        Vector <EffectDesc *> effects = mOutputStreams.valueAt(index)->mEffects;
+        for (size_t i = 0; i < effects.size(); i++) {
+            EffectDesc *effect = effects[i];
+            sp<AudioEffect> fx = new AudioEffect(NULL, &effect->mUuid, 0, 0, 0,
+                                                 audioSession, output);
+            status_t status = fx->initCheck();
+            if (status != NO_ERROR && status != ALREADY_EXISTS) {
+                ALOGE("addOutputSessionEffects(): failed to create Fx  %s on session %d",
+                      effect->mName, audioSession);
+                // fx goes out of scope and strong ref on AudioEffect is released
+                continue;
+            }
+            ALOGV("addOutputSessionEffects(): added Fx %s on session: %d for stream: %d",
+                  effect->mName, audioSession, (int32_t)stream);
+            procDesc->mEffects.add(fx);
         }
-        ALOGV("addOutputSessionEffects(): added Fx %s on session: %d for stream: %d",
-              effect->mName, audioSession, (int32_t)stream);
-        procDesc->mEffects.add(fx);
+
+        procDesc->setProcessorEnabled(true);
     }
-
-    procDesc->setProcessorEnabled(true);
-
     return status;
 }
 
@@ -281,7 +285,8 @@
 
     EffectVector *procDesc = mOutputSessions.valueAt(index);
     procDesc->mRefCount--;
-    ALOGV("releaseOutputSessionEffects(): session: %d, refCount: %d", audioSession, procDesc->mRefCount);
+    ALOGV("releaseOutputSessionEffects(): session: %d, refCount: %d",
+          audioSession, procDesc->mRefCount);
     if (procDesc->mRefCount == 0) {
         procDesc->setProcessorEnabled(false);
         procDesc->mEffects.clear();
diff --git a/services/audiopolicy/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/AudioPolicyInterfaceImpl.cpp
index 6cd0ac8..c06ca72 100644
--- a/services/audiopolicy/AudioPolicyInterfaceImpl.cpp
+++ b/services/audiopolicy/AudioPolicyInterfaceImpl.cpp
@@ -129,8 +129,11 @@
                                     audio_output_flags_t flags,
                                     const audio_offload_info_t *offloadInfo)
 {
+    if (uint32_t(stream) >= AUDIO_STREAM_CNT) {
+        return AUDIO_IO_HANDLE_NONE;
+    }
     if (mAudioPolicyManager == NULL) {
-        return 0;
+        return AUDIO_IO_HANDLE_NONE;
     }
     ALOGV("getOutput()");
     Mutex::Autolock _l(mLock);
@@ -158,6 +161,9 @@
                                          audio_stream_type_t stream,
                                          int session)
 {
+    if (uint32_t(stream) >= AUDIO_STREAM_CNT) {
+        return BAD_VALUE;
+    }
     if (mAudioPolicyManager == NULL) {
         return NO_INIT;
     }
@@ -182,6 +188,9 @@
                                         audio_stream_type_t stream,
                                         int session)
 {
+    if (uint32_t(stream) >= AUDIO_STREAM_CNT) {
+        return BAD_VALUE;
+    }
     if (mAudioPolicyManager == NULL) {
         return NO_INIT;
     }
@@ -238,11 +247,13 @@
         return 0;
     }
     // already checked by client, but double-check in case the client wrapper is bypassed
-    if (inputSource >= AUDIO_SOURCE_CNT && inputSource != AUDIO_SOURCE_HOTWORD) {
+    if (inputSource >= AUDIO_SOURCE_CNT && inputSource != AUDIO_SOURCE_HOTWORD &&
+        inputSource != AUDIO_SOURCE_FM_TUNER) {
         return 0;
     }
 
-    if ((inputSource == AUDIO_SOURCE_HOTWORD) && !captureHotwordAllowed()) {
+    if (((inputSource == AUDIO_SOURCE_HOTWORD) && !captureHotwordAllowed()) ||
+        ((inputSource == AUDIO_SOURCE_FM_TUNER) && !captureFmTunerAllowed())) {
         return 0;
     }
     audio_io_handle_t input;
@@ -366,6 +377,9 @@
 
 uint32_t AudioPolicyService::getStrategyForStream(audio_stream_type_t stream)
 {
+    if (uint32_t(stream) >= AUDIO_STREAM_CNT) {
+        return 0;
+    }
     if (mAudioPolicyManager == NULL) {
         return 0;
     }
@@ -376,8 +390,11 @@
 
 audio_devices_t AudioPolicyService::getDevicesForStream(audio_stream_type_t stream)
 {
+    if (uint32_t(stream) >= AUDIO_STREAM_CNT) {
+        return AUDIO_DEVICE_NONE;
+    }
     if (mAudioPolicyManager == NULL) {
-        return (audio_devices_t)0;
+        return AUDIO_DEVICE_NONE;
     }
     return mAudioPolicyManager->getDevicesForStream(stream);
 }
@@ -422,8 +439,11 @@
 
 bool AudioPolicyService::isStreamActive(audio_stream_type_t stream, uint32_t inPastMs) const
 {
+    if (uint32_t(stream) >= AUDIO_STREAM_CNT) {
+        return false;
+    }
     if (mAudioPolicyManager == NULL) {
-        return 0;
+        return false;
     }
     Mutex::Autolock _l(mLock);
     return mAudioPolicyManager->isStreamActive(stream, inPastMs);
@@ -431,8 +451,11 @@
 
 bool AudioPolicyService::isStreamActiveRemotely(audio_stream_type_t stream, uint32_t inPastMs) const
 {
+    if (uint32_t(stream) >= AUDIO_STREAM_CNT) {
+        return false;
+    }
     if (mAudioPolicyManager == NULL) {
-        return 0;
+        return false;
     }
     Mutex::Autolock _l(mLock);
     return mAudioPolicyManager->isStreamActiveRemotely(stream, inPastMs);
diff --git a/services/audiopolicy/AudioPolicyInterfaceImplLegacy.cpp b/services/audiopolicy/AudioPolicyInterfaceImplLegacy.cpp
index e1e81e1..09476c1 100644
--- a/services/audiopolicy/AudioPolicyInterfaceImplLegacy.cpp
+++ b/services/audiopolicy/AudioPolicyInterfaceImplLegacy.cpp
@@ -134,8 +134,11 @@
                                     audio_output_flags_t flags,
                                     const audio_offload_info_t *offloadInfo)
 {
+    if (uint32_t(stream) >= AUDIO_STREAM_CNT) {
+        return AUDIO_IO_HANDLE_NONE;
+    }
     if (mpAudioPolicy == NULL) {
-        return 0;
+        return AUDIO_IO_HANDLE_NONE;
     }
     ALOGV("getOutput()");
     Mutex::Autolock _l(mLock);
@@ -147,6 +150,9 @@
                                          audio_stream_type_t stream,
                                          int session)
 {
+    if (uint32_t(stream) >= AUDIO_STREAM_CNT) {
+        return BAD_VALUE;
+    }
     if (mpAudioPolicy == NULL) {
         return NO_INIT;
     }
@@ -172,6 +178,9 @@
                                         audio_stream_type_t stream,
                                         int session)
 {
+    if (uint32_t(stream) >= AUDIO_STREAM_CNT) {
+        return BAD_VALUE;
+    }
     if (mpAudioPolicy == NULL) {
         return NO_INIT;
     }
@@ -228,11 +237,13 @@
         return 0;
     }
     // already checked by client, but double-check in case the client wrapper is bypassed
-    if (inputSource >= AUDIO_SOURCE_CNT && inputSource != AUDIO_SOURCE_HOTWORD) {
+    if (inputSource >= AUDIO_SOURCE_CNT && inputSource != AUDIO_SOURCE_HOTWORD &&
+        inputSource != AUDIO_SOURCE_FM_TUNER) {
         return 0;
     }
 
-    if ((inputSource == AUDIO_SOURCE_HOTWORD) && !captureHotwordAllowed()) {
+    if (((inputSource == AUDIO_SOURCE_HOTWORD) && !captureHotwordAllowed()) ||
+        ((inputSource == AUDIO_SOURCE_FM_TUNER) && !captureFmTunerAllowed())) {
         return 0;
     }
 
@@ -368,6 +379,9 @@
 
 uint32_t AudioPolicyService::getStrategyForStream(audio_stream_type_t stream)
 {
+    if (uint32_t(stream) >= AUDIO_STREAM_CNT) {
+        return 0;
+    }
     if (mpAudioPolicy == NULL) {
         return 0;
     }
@@ -378,8 +392,11 @@
 
 audio_devices_t AudioPolicyService::getDevicesForStream(audio_stream_type_t stream)
 {
+    if (uint32_t(stream) >= AUDIO_STREAM_CNT) {
+        return AUDIO_DEVICE_NONE;
+    }
     if (mpAudioPolicy == NULL) {
-        return (audio_devices_t)0;
+        return AUDIO_DEVICE_NONE;
     }
     return mpAudioPolicy->get_devices_for_stream(mpAudioPolicy, stream);
 }
@@ -424,8 +441,11 @@
 
 bool AudioPolicyService::isStreamActive(audio_stream_type_t stream, uint32_t inPastMs) const
 {
+    if (uint32_t(stream) >= AUDIO_STREAM_CNT) {
+        return false;
+    }
     if (mpAudioPolicy == NULL) {
-        return 0;
+        return false;
     }
     Mutex::Autolock _l(mLock);
     return mpAudioPolicy->is_stream_active(mpAudioPolicy, stream, inPastMs);
@@ -433,8 +453,11 @@
 
 bool AudioPolicyService::isStreamActiveRemotely(audio_stream_type_t stream, uint32_t inPastMs) const
 {
+    if (uint32_t(stream) >= AUDIO_STREAM_CNT) {
+        return false;
+    }
     if (mpAudioPolicy == NULL) {
-        return 0;
+        return false;
     }
     Mutex::Autolock _l(mLock);
     return mpAudioPolicy->is_stream_active_remotely(mpAudioPolicy, stream, inPastMs);
diff --git a/services/audiopolicy/AudioPolicyManager.cpp b/services/audiopolicy/AudioPolicyManager.cpp
index 536987a..584e170 100644
--- a/services/audiopolicy/AudioPolicyManager.cpp
+++ b/services/audiopolicy/AudioPolicyManager.cpp
@@ -26,7 +26,7 @@
 
 // A device mask for all audio input devices that are considered "virtual" when evaluating
 // active inputs in getActiveInput()
-#define APM_AUDIO_IN_DEVICE_VIRTUAL_ALL  AUDIO_DEVICE_IN_REMOTE_SUBMIX
+#define APM_AUDIO_IN_DEVICE_VIRTUAL_ALL  (AUDIO_DEVICE_IN_REMOTE_SUBMIX|AUDIO_DEVICE_IN_FM_TUNER)
 // A device mask for all audio output devices that are considered "remote" when evaluating
 // active output devices in isStreamActiveRemotely()
 #define APM_AUDIO_OUT_DEVICE_REMOTE_ALL  AUDIO_DEVICE_OUT_REMOTE_SUBMIX
@@ -216,6 +216,10 @@
                                                   const char *device_address)
 {
     String8 address = (device_address == NULL) ? String8("") : String8(device_address);
+    // handle legacy remote submix case where the address was not always specified
+    if (deviceDistinguishesOnAddress(device) && (address.length() == 0)) {
+        address = String8("0");
+    }
 
     ALOGV("setDeviceConnectionState() device: %x, state %d, address %s",
             device, state, address.string());
@@ -419,6 +423,10 @@
     audio_policy_dev_state_t state = AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE;
     sp<DeviceDescriptor> devDesc = new DeviceDescriptor(String8(""), device);
     devDesc->mAddress = (device_address == NULL) ? String8("") : String8(device_address);
+    // handle legacy remote submix case where the address was not always specified
+    if (deviceDistinguishesOnAddress(device) && (devDesc->mAddress.length() == 0)) {
+        devDesc->mAddress = String8("0");
+    }
     ssize_t index;
     DeviceVector *deviceVector;
 
@@ -707,7 +715,7 @@
             config != AUDIO_POLICY_FORCE_WIRED_ACCESSORY &&
             config != AUDIO_POLICY_FORCE_ANALOG_DOCK &&
             config != AUDIO_POLICY_FORCE_DIGITAL_DOCK && config != AUDIO_POLICY_FORCE_NONE &&
-            config != AUDIO_POLICY_FORCE_NO_BT_A2DP) {
+            config != AUDIO_POLICY_FORCE_NO_BT_A2DP && config != AUDIO_POLICY_FORCE_SPEAKER ) {
             ALOGW("setForceUse() invalid config %d for FOR_MEDIA", config);
             return;
         }
@@ -854,7 +862,7 @@
         flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_HW_AV_SYNC);
     }
 
-    ALOGV("getOutputForAttr() device %d, samplingRate %d, format %x, channelMask %x, flags %x",
+    ALOGV("getOutputForAttr() device 0x%x, samplingRate %d, format %x, channelMask %x, flags %x",
           device, samplingRate, format, channelMask, flags);
 
     audio_stream_type_t stream = streamTypefromAttributesInt(attr);
@@ -1119,6 +1127,20 @@
         return BAD_VALUE;
     }
 
+    // cannot start playback of STREAM_TTS if any other output is being used
+    uint32_t beaconMuteLatency = 0;
+    if (stream == AUDIO_STREAM_TTS) {
+        ALOGV("\t found BEACON stream");
+        if (isAnyOutputActive(AUDIO_STREAM_TTS /*streamToIgnore*/)) {
+            return INVALID_OPERATION;
+        } else {
+            beaconMuteLatency = handleEventForBeacon(STARTING_BEACON);
+        }
+    } else {
+        // some playback other than beacon starts
+        beaconMuteLatency = handleEventForBeacon(STARTING_OUTPUT);
+    }
+
     sp<AudioOutputDescriptor> outputDesc = mOutputs.valueAt(index);
 
     // increment usage count for this stream on the requested output:
@@ -1130,8 +1152,9 @@
         audio_devices_t newDevice = getNewOutputDevice(output, false /*fromCache*/);
         routing_strategy strategy = getStrategy(stream);
         bool shouldWait = (strategy == STRATEGY_SONIFICATION) ||
-                            (strategy == STRATEGY_SONIFICATION_RESPECTFUL);
-        uint32_t waitMs = 0;
+                            (strategy == STRATEGY_SONIFICATION_RESPECTFUL) ||
+                            (beaconMuteLatency > 0);
+        uint32_t waitMs = beaconMuteLatency;
         bool force = false;
         for (size_t i = 0; i < mOutputs.size(); i++) {
             sp<AudioOutputDescriptor> desc = mOutputs.valueAt(i);
@@ -1145,7 +1168,8 @@
                     force = true;
                 }
                 // wait for audio on other active outputs to be presented when starting
-                // a notification so that audio focus effect can propagate.
+                // a notification so that audio focus effect can propagate, or that a mute/unmute
+                // event occurred for beacon
                 uint32_t latency = desc->latency();
                 if (shouldWait && desc->isActive(latency * 2) && (waitMs < latency)) {
                     waitMs = latency;
@@ -1189,6 +1213,9 @@
 
     sp<AudioOutputDescriptor> outputDesc = mOutputs.valueAt(index);
 
+    // always handle stream stop, check which stream type is stopping
+    handleEventForBeacon(stream == AUDIO_STREAM_TTS ? STOPPING_BEACON : STOPPING_OUTPUT);
+
     // handle special case for sonification while in call
     if (isInCall()) {
         handleIncallSonification(stream, false, false);
@@ -1356,11 +1383,14 @@
     config.channel_mask = channelMask;
     config.format = format;
 
+    // handle legacy remote submix case where the address was not always specified
+    String8 address = deviceDistinguishesOnAddress(device) ? String8("0") : String8("");
+
     status_t status = mpClientInterface->openInput(profile->mModule->mHandle,
                                                    &input,
                                                    &config,
                                                    &device,
-                                                   String8(""),
+                                                   address,
                                                    halInputSource,
                                                    flags);
 
@@ -1584,12 +1614,17 @@
     }
     mStreams[stream].mIndexCur.add(device, index);
 
-    // compute and apply stream volume on all outputs according to connected device
+    // update volume on all outputs whose current device is also selected by the same
+    // strategy as the device specified by the caller
+    audio_devices_t strategyDevice = getDeviceForStrategy(getStrategy(stream), true /*fromCache*/);
+    if ((device != AUDIO_DEVICE_OUT_DEFAULT) && (device & strategyDevice) == 0) {
+        return NO_ERROR;
+    }
     status_t status = NO_ERROR;
     for (size_t i = 0; i < mOutputs.size(); i++) {
         audio_devices_t curDevice =
                 getDeviceForVolume(mOutputs.valueAt(i)->device());
-        if ((device == AUDIO_DEVICE_OUT_DEFAULT) || (device == curDevice)) {
+        if ((device == AUDIO_DEVICE_OUT_DEFAULT) || ((curDevice & strategyDevice) != 0)) {
             status_t volStatus = checkAndSetVolume(stream, index, mOutputs.keyAt(i), curDevice);
             if (volStatus != NO_ERROR) {
                 status = volStatus;
@@ -2653,7 +2688,10 @@
     mTotalEffectsCpuLoad(0), mTotalEffectsMemory(0),
     mA2dpSuspended(false),
     mSpeakerDrcEnabled(false), mNextUniqueId(1),
-    mAudioPortGeneration(1)
+    mAudioPortGeneration(1),
+    mBeaconMuteRefCount(0),
+    mBeaconPlayingRefCount(0),
+    mBeaconMuted(false)
 {
     mUidCached = getuid();
     mpClientInterface = clientInterface;
@@ -2787,6 +2825,14 @@
             inputDesc->mInputSource = AUDIO_SOURCE_MIC;
             inputDesc->mDevice = profileType;
 
+            // find the address
+            DeviceVector inputDevices = mAvailableInputDevices.getDevicesFromType(profileType);
+            //   the inputs vector must be of size 1, but we don't want to crash here
+            String8 address = inputDevices.size() > 0 ? inputDevices.itemAt(0)->mAddress
+                    : String8("");
+            ALOGV("  for input device 0x%x using address %s", profileType, address.string());
+            ALOGE_IF(inputDevices.size() == 0, "Input device list is empty!");
+
             audio_config_t config = AUDIO_CONFIG_INITIALIZER;
             config.sample_rate = inputDesc->mSamplingRate;
             config.channel_mask = inputDesc->mChannelMask;
@@ -2796,7 +2842,7 @@
                                                            &input,
                                                            &config,
                                                            &inputDesc->mDevice,
-                                                           String8(""),
+                                                           address,
                                                            AUDIO_SOURCE_MIC,
                                                            AUDIO_INPUT_FLAG_NONE);
 
@@ -3816,6 +3862,8 @@
     //      use device for strategy media
     // 7: the strategy DTMF is active on the output:
     //      use device for strategy DTMF
+    // 8: the strategy for beacon, a.k.a. "transmitted through speaker" is active on the output:
+    //      use device for strategy t-t-s
     if (outputDesc->isStrategyActive(STRATEGY_ENFORCED_AUDIBLE) &&
         mForceUse[AUDIO_POLICY_FORCE_FOR_SYSTEM] == AUDIO_POLICY_FORCE_SYSTEM_ENFORCED) {
         device = getDeviceForStrategy(STRATEGY_ENFORCED_AUDIBLE, fromCache);
@@ -3832,6 +3880,8 @@
         device = getDeviceForStrategy(STRATEGY_MEDIA, fromCache);
     } else if (outputDesc->isStrategyActive(STRATEGY_DTMF)) {
         device = getDeviceForStrategy(STRATEGY_DTMF, fromCache);
+    } else if (outputDesc->isStrategyActive(STRATEGY_TRANSMITTED_THROUGH_SPEAKER)) {
+        device = getDeviceForStrategy(STRATEGY_TRANSMITTED_THROUGH_SPEAKER, fromCache);
     }
 
     ALOGV("getNewOutputDevice() selected device %x", device);
@@ -3910,16 +3960,20 @@
     case AUDIO_STREAM_SYSTEM:
         // NOTE: SYSTEM stream uses MEDIA strategy because muting music and switching outputs
         // while key clicks are played produces a poor result
-    case AUDIO_STREAM_TTS:
     case AUDIO_STREAM_MUSIC:
         return STRATEGY_MEDIA;
     case AUDIO_STREAM_ENFORCED_AUDIBLE:
         return STRATEGY_ENFORCED_AUDIBLE;
+    case AUDIO_STREAM_TTS:
+        return STRATEGY_TRANSMITTED_THROUGH_SPEAKER;
     }
 }
 
 uint32_t AudioPolicyManager::getStrategyForAttr(const audio_attributes_t *attr) {
     // flags to strategy mapping
+    if ((attr->flags & AUDIO_FLAG_BEACON) == AUDIO_FLAG_BEACON) {
+        return (uint32_t) STRATEGY_TRANSMITTED_THROUGH_SPEAKER;
+    }
     if ((attr->flags & AUDIO_FLAG_AUDIBILITY_ENFORCED) == AUDIO_FLAG_AUDIBILITY_ENFORCED) {
         return (uint32_t) STRATEGY_ENFORCED_AUDIBLE;
     }
@@ -3967,6 +4021,74 @@
     }
 }
 
+bool AudioPolicyManager::isAnyOutputActive(audio_stream_type_t streamToIgnore) {
+    for (size_t s = 0 ; s < AUDIO_STREAM_CNT ; s++) {
+        if (s == (size_t) streamToIgnore) {
+            continue;
+        }
+        for (size_t i = 0; i < mOutputs.size(); i++) {
+            const sp<AudioOutputDescriptor> outputDesc = mOutputs.valueAt(i);
+            if (outputDesc->mRefCount[s] != 0) {
+                return true;
+            }
+        }
+    }
+    return false;
+}
+
+uint32_t AudioPolicyManager::handleEventForBeacon(int event) {
+    switch(event) {
+    case STARTING_OUTPUT:
+        mBeaconMuteRefCount++;
+        break;
+    case STOPPING_OUTPUT:
+        if (mBeaconMuteRefCount > 0) {
+            mBeaconMuteRefCount--;
+        }
+        break;
+    case STARTING_BEACON:
+        mBeaconPlayingRefCount++;
+        break;
+    case STOPPING_BEACON:
+        if (mBeaconPlayingRefCount > 0) {
+            mBeaconPlayingRefCount--;
+        }
+        break;
+    }
+
+    if (mBeaconMuteRefCount > 0) {
+        // any playback causes beacon to be muted
+        return setBeaconMute(true);
+    } else {
+        // no other playback: unmute when beacon starts playing, mute when it stops
+        return setBeaconMute(mBeaconPlayingRefCount == 0);
+    }
+}
+
+uint32_t AudioPolicyManager::setBeaconMute(bool mute) {
+    ALOGV("setBeaconMute(%d) mBeaconMuteRefCount=%d mBeaconPlayingRefCount=%d",
+            mute, mBeaconMuteRefCount, mBeaconPlayingRefCount);
+    // keep track of muted state to avoid repeating mute/unmute operations
+    if (mBeaconMuted != mute) {
+        // mute/unmute AUDIO_STREAM_TTS on all outputs
+        ALOGV("\t muting %d", mute);
+        uint32_t maxLatency = 0;
+        for (size_t i = 0; i < mOutputs.size(); i++) {
+            sp<AudioOutputDescriptor> desc = mOutputs.valueAt(i);
+            setStreamMute(AUDIO_STREAM_TTS, mute/*on*/,
+                    desc->mIoHandle,
+                    0 /*delay*/, AUDIO_DEVICE_NONE);
+            const uint32_t latency = desc->latency() * 2;
+            if (latency > maxLatency) {
+                maxLatency = latency;
+            }
+        }
+        mBeaconMuted = mute;
+        return maxLatency;
+    }
+    return 0;
+}
+
 audio_devices_t AudioPolicyManager::getDeviceForStrategy(routing_strategy strategy,
                                                              bool fromCache)
 {
@@ -3980,6 +4102,14 @@
     audio_devices_t availableOutputDeviceTypes = mAvailableOutputDevices.types();
     switch (strategy) {
 
+    case STRATEGY_TRANSMITTED_THROUGH_SPEAKER:
+        device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_SPEAKER;
+        if (!device) {
+            ALOGE("getDeviceForStrategy() no device found for "\
+                    "STRATEGY_TRANSMITTED_THROUGH_SPEAKER");
+        }
+        break;
+
     case STRATEGY_SONIFICATION_RESPECTFUL:
         if (isInCall()) {
             device = getDeviceForStrategy(STRATEGY_SONIFICATION, false /*fromCache*/);
@@ -4158,6 +4288,10 @@
                 device2 = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER;
             }
         }
+        if ((device2 == AUDIO_DEVICE_NONE) &&
+            (mForceUse[AUDIO_POLICY_FORCE_FOR_MEDIA] == AUDIO_POLICY_FORCE_SPEAKER)) {
+            device2 = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_SPEAKER;
+        }
         if (device2 == AUDIO_DEVICE_NONE) {
             device2 = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_WIRED_HEADPHONE;
         }
@@ -4248,6 +4382,7 @@
 
     for (size_t i = 0; i < NUM_STRATEGIES; i++) {
         audio_devices_t curDevice = getDeviceForStrategy((routing_strategy)i, false /*fromCache*/);
+        curDevice = curDevice & outputDesc->mProfile->mSupportedDevices.types();
         bool mute = shouldMute && (curDevice & device) && (curDevice != device);
         bool doMute = false;
 
@@ -4348,11 +4483,15 @@
     muteWaitMs = checkDeviceMuteStrategies(outputDesc, prevDevice, delayMs);
 
     // Do not change the routing if:
-    //  - the requested device is AUDIO_DEVICE_NONE
-    //  - the requested device is the same as current device and force is not specified.
+    //      the requested device is AUDIO_DEVICE_NONE
+    //      OR the requested device is the same as current device
+    //  AND force is not specified
+    //  AND the output is connected by a valid audio patch.
     // Doing this check here allows the caller to call setOutputDevice() without conditions
-    if ((device == AUDIO_DEVICE_NONE || device == prevDevice) && !force) {
-        ALOGV("setOutputDevice() setting same device %04x or null device for output %d", device, output);
+    if ((device == AUDIO_DEVICE_NONE || device == prevDevice) && !force &&
+            outputDesc->mPatchHandle != 0) {
+        ALOGV("setOutputDevice() setting same device %04x or null device for output %d",
+              device, output);
         return muteWaitMs;
     }
 
@@ -4667,6 +4806,11 @@
             device = AUDIO_DEVICE_IN_REMOTE_SUBMIX;
         }
         break;
+     case AUDIO_SOURCE_FM_TUNER:
+        if (availableDeviceTypes & AUDIO_DEVICE_IN_FM_TUNER) {
+            device = AUDIO_DEVICE_IN_FM_TUNER;
+        }
+        break;
     default:
         ALOGW("getDeviceForInputSource() invalid input source %d", inputSource);
         break;
@@ -4891,6 +5035,16 @@
 };
 
 const AudioPolicyManager::VolumeCurvePoint
+    AudioPolicyManager::sLinearVolumeCurve[AudioPolicyManager::VOLCNT] = {
+    {0, -96.0f}, {33, -68.0f}, {66, -34.0f}, {100, 0.0f}
+};
+
+const AudioPolicyManager::VolumeCurvePoint
+    AudioPolicyManager::sSilentVolumeCurve[AudioPolicyManager::VOLCNT] = {
+    {0, -96.0f}, {1, -96.0f}, {2, -96.0f}, {100, -96.0f}
+};
+
+const AudioPolicyManager::VolumeCurvePoint
             *AudioPolicyManager::sVolumeProfiles[AUDIO_STREAM_CNT]
                                                    [AudioPolicyManager::DEVICE_CATEGORY_CNT] = {
     { // AUDIO_STREAM_VOICE_CALL
@@ -4948,10 +5102,11 @@
         sExtMediaSystemVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
     },
     { // AUDIO_STREAM_TTS
-        sDefaultMediaVolumeCurve, // DEVICE_CATEGORY_HEADSET
-        sSpeakerMediaVolumeCurve, // DEVICE_CATEGORY_SPEAKER
-        sDefaultMediaVolumeCurve, // DEVICE_CATEGORY_EARPIECE
-        sDefaultMediaVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
+      // "Transmitted Through Speaker": always silent except on DEVICE_CATEGORY_SPEAKER
+        sSilentVolumeCurve, // DEVICE_CATEGORY_HEADSET
+        sLinearVolumeCurve, // DEVICE_CATEGORY_SPEAKER
+        sSilentVolumeCurve, // DEVICE_CATEGORY_EARPIECE
+        sSilentVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
     },
 };
 
@@ -5843,12 +5998,28 @@
             }
         }
     }
+    for (size_t k = 0 ; k < port->mGains.size() ; k++) {
+        sp<AudioGain> gain = port->mGains.itemAt(k);
+        if (gain != 0) {
+            bool hasGain = false;
+            for (size_t l = 0 ; l < mGains.size() ; l++) {
+                if (gain == mGains.itemAt(l)) {
+                    hasGain = true;
+                    break;
+                }
+            }
+            if (!hasGain) { // never import a gain twice
+                mGains.add(gain);
+            }
+        }
+    }
 }
 
 void AudioPolicyManager::AudioPort::clearCapabilities() {
     mChannelMasks.clear();
     mFormats.clear();
     mSamplingRates.clear();
+    mGains.clear();
 }
 
 void AudioPolicyManager::AudioPort::loadSamplingRates(char *name)
@@ -6791,7 +6962,11 @@
                                  ARRAY_SIZE(sDeviceNameToEnumTable),
                                  devName);
             if (type != AUDIO_DEVICE_NONE) {
-                add(new DeviceDescriptor(String8(""), type));
+                sp<DeviceDescriptor> dev = new DeviceDescriptor(String8(""), type);
+                if (type == AUDIO_DEVICE_IN_REMOTE_SUBMIX) {
+                    dev->mAddress = String8("0");
+                }
+                add(dev);
             } else {
                 sp<DeviceDescriptor> deviceDesc =
                         declaredDevices.getDeviceFromName(String8(devName));
diff --git a/services/audiopolicy/AudioPolicyManager.h b/services/audiopolicy/AudioPolicyManager.h
index 0ea7b97..50d7831 100644
--- a/services/audiopolicy/AudioPolicyManager.h
+++ b/services/audiopolicy/AudioPolicyManager.h
@@ -187,6 +187,7 @@
             STRATEGY_SONIFICATION_RESPECTFUL,
             STRATEGY_DTMF,
             STRATEGY_ENFORCED_AUDIBLE,
+            STRATEGY_TRANSMITTED_THROUGH_SPEAKER,
             NUM_STRATEGIES
         };
 
@@ -434,6 +435,8 @@
         static const VolumeCurvePoint sHeadsetSystemVolumeCurve[AudioPolicyManager::VOLCNT];
         static const VolumeCurvePoint sDefaultVoiceVolumeCurve[AudioPolicyManager::VOLCNT];
         static const VolumeCurvePoint sSpeakerVoiceVolumeCurve[AudioPolicyManager::VOLCNT];
+        static const VolumeCurvePoint sLinearVolumeCurve[AudioPolicyManager::VOLCNT];
+        static const VolumeCurvePoint sSilentVolumeCurve[AudioPolicyManager::VOLCNT];
         // default volume curves per stream and device category. See initializeVolumeCurves()
         static const VolumeCurvePoint *sVolumeProfiles[AUDIO_STREAM_CNT][DEVICE_CATEGORY_CNT];
 
@@ -600,8 +603,10 @@
                                     audio_io_handle_t output, audio_devices_t device);
 
         // check that volume change is permitted, compute and send new volume to audio hardware
-        status_t checkAndSetVolume(audio_stream_type_t stream, int index, audio_io_handle_t output,
-                                   audio_devices_t device, int delayMs = 0, bool force = false);
+        virtual status_t checkAndSetVolume(audio_stream_type_t stream, int index,
+                                           audio_io_handle_t output,
+                                           audio_devices_t device,
+                                           int delayMs = 0, bool force = false);
 
         // apply all stream volumes to the specified output and device
         void applyStreamVolumes(audio_io_handle_t output, audio_devices_t device, int delayMs = 0, bool force = false);
@@ -806,6 +811,18 @@
         sp<AudioPatch> mCallTxPatch;
         sp<AudioPatch> mCallRxPatch;
 
+        // for supporting "beacon" streams, i.e. streams that only play on speaker, and never
+        // when something other than STREAM_TTS (a.k.a. "Transmitted Through Speaker") is playing
+        enum {
+            STARTING_OUTPUT,
+            STARTING_BEACON,
+            STOPPING_OUTPUT,
+            STOPPING_BEACON
+        };
+        uint32_t mBeaconMuteRefCount;   // ref count for stream that would mute beacon
+        uint32_t mBeaconPlayingRefCount;// ref count for the playing beacon streams
+        bool mBeaconMuted;              // has STREAM_TTS been muted
+
 #ifdef AUDIO_POLICY_TEST
         Mutex   mLock;
         Condition mWaitWorkCV;
@@ -820,10 +837,9 @@
         uint32_t        mTestChannels;
         uint32_t        mTestLatencyMs;
 #endif //AUDIO_POLICY_TEST
-
-private:
         static float volIndexToAmpl(audio_devices_t device, const StreamDescriptor& streamDesc,
                 int indexInUi);
+private:
         // updates device caching and output for streams that can influence the
         //    routing of notifications
         void handleNotificationRoutingForStream(audio_stream_type_t stream);
@@ -851,6 +867,13 @@
                 const audio_offload_info_t *offloadInfo);
         // internal function to derive a stream type value from audio attributes
         audio_stream_type_t streamTypefromAttributesInt(const audio_attributes_t *attr);
+        // return true if any output is playing anything besides the stream to ignore
+        bool isAnyOutputActive(audio_stream_type_t streamToIgnore);
+        // event is one of STARTING_OUTPUT, STARTING_BEACON, STOPPING_OUTPUT, STOPPING_BEACON
+        // returns 0 if no mute/unmute event happened, the largest latency of the device where
+        //   the mute/unmute happened
+        uint32_t handleEventForBeacon(int event);
+        uint32_t setBeaconMute(bool mute);
 };
 
 };
diff --git a/services/camera/libcameraservice/api1/Camera2Client.cpp b/services/camera/libcameraservice/api1/Camera2Client.cpp
index dd2f64d..8e40534 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.cpp
+++ b/services/camera/libcameraservice/api1/Camera2Client.cpp
@@ -420,12 +420,20 @@
 
     ALOGV("Camera %d: Waiting for threads", mCameraId);
 
-    mStreamingProcessor->join();
-    mFrameProcessor->join();
-    mCaptureSequencer->join();
-    mJpegProcessor->join();
-    mZslProcessorThread->join();
-    mCallbackProcessor->join();
+    {
+        // Don't wait with lock held, in case the other threads need to
+        // complete callbacks that re-enter Camera2Client
+        mBinderSerializationLock.unlock();
+
+        mStreamingProcessor->join();
+        mFrameProcessor->join();
+        mCaptureSequencer->join();
+        mJpegProcessor->join();
+        mZslProcessorThread->join();
+        mCallbackProcessor->join();
+
+        mBinderSerializationLock.lock();
+    }
 
     ALOGV("Camera %d: Deleting streams", mCameraId);
 
diff --git a/services/camera/libcameraservice/api1/client2/BurstCapture.cpp b/services/camera/libcameraservice/api1/client2/BurstCapture.cpp
index 0bfdfd4..5502dcb 100644
--- a/services/camera/libcameraservice/api1/client2/BurstCapture.cpp
+++ b/services/camera/libcameraservice/api1/client2/BurstCapture.cpp
@@ -44,7 +44,7 @@
     return INVALID_OPERATION;
 }
 
-void BurstCapture::onFrameAvailable() {
+void BurstCapture::onFrameAvailable(const BufferItem &/*item*/) {
     ALOGV("%s", __FUNCTION__);
     Mutex::Autolock l(mInputMutex);
     if(!mInputChanged) {
diff --git a/services/camera/libcameraservice/api1/client2/BurstCapture.h b/services/camera/libcameraservice/api1/client2/BurstCapture.h
index ea321fd..c3b7722 100644
--- a/services/camera/libcameraservice/api1/client2/BurstCapture.h
+++ b/services/camera/libcameraservice/api1/client2/BurstCapture.h
@@ -39,7 +39,7 @@
     BurstCapture(wp<Camera2Client> client, wp<CaptureSequencer> sequencer);
     virtual ~BurstCapture();
 
-    virtual void onFrameAvailable();
+    virtual void onFrameAvailable(const BufferItem& item);
     virtual status_t start(Vector<CameraMetadata> &metadatas, int32_t firstCaptureId);
 
 protected:
diff --git a/services/camera/libcameraservice/api1/client2/CallbackProcessor.cpp b/services/camera/libcameraservice/api1/client2/CallbackProcessor.cpp
index bf3318e..eadaa00 100644
--- a/services/camera/libcameraservice/api1/client2/CallbackProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/CallbackProcessor.cpp
@@ -46,7 +46,7 @@
     deleteStream();
 }
 
-void CallbackProcessor::onFrameAvailable() {
+void CallbackProcessor::onFrameAvailable(const BufferItem& /*item*/) {
     Mutex::Autolock l(mInputMutex);
     if (!mCallbackAvailable) {
         mCallbackAvailable = true;
diff --git a/services/camera/libcameraservice/api1/client2/CallbackProcessor.h b/services/camera/libcameraservice/api1/client2/CallbackProcessor.h
index 613f5be..7fdc329 100644
--- a/services/camera/libcameraservice/api1/client2/CallbackProcessor.h
+++ b/services/camera/libcameraservice/api1/client2/CallbackProcessor.h
@@ -44,7 +44,7 @@
     CallbackProcessor(sp<Camera2Client> client);
     ~CallbackProcessor();
 
-    void onFrameAvailable();
+    void onFrameAvailable(const BufferItem& item);
 
     // Set to NULL to disable the direct-to-app callback window
     status_t setCallbackWindow(sp<ANativeWindow> callbackWindow);
diff --git a/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp b/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp
index b433781..2772267 100644
--- a/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp
@@ -51,7 +51,7 @@
     deleteStream();
 }
 
-void JpegProcessor::onFrameAvailable() {
+void JpegProcessor::onFrameAvailable(const BufferItem& /*item*/) {
     Mutex::Autolock l(mInputMutex);
     if (!mCaptureAvailable) {
         mCaptureAvailable = true;
diff --git a/services/camera/libcameraservice/api1/client2/JpegProcessor.h b/services/camera/libcameraservice/api1/client2/JpegProcessor.h
index b2c05df..2040b30 100644
--- a/services/camera/libcameraservice/api1/client2/JpegProcessor.h
+++ b/services/camera/libcameraservice/api1/client2/JpegProcessor.h
@@ -47,7 +47,7 @@
     ~JpegProcessor();
 
     // CpuConsumer listener implementation
-    void onFrameAvailable();
+    void onFrameAvailable(const BufferItem& item);
 
     status_t updateStream(const Parameters &params);
     status_t deleteStream();
diff --git a/services/camera/libcameraservice/api1/client2/Parameters.cpp b/services/camera/libcameraservice/api1/client2/Parameters.cpp
index 7b90d28..42a5507 100644
--- a/services/camera/libcameraservice/api1/client2/Parameters.cpp
+++ b/services/camera/libcameraservice/api1/client2/Parameters.cpp
@@ -2954,6 +2954,10 @@
             staticInfo(ANDROID_SENSOR_INFO_PHYSICAL_SIZE, 2, 2);
     if (!sensorSize.count) return NO_INIT;
 
+    camera_metadata_ro_entry_t pixelArraySize =
+            staticInfo(ANDROID_SENSOR_INFO_PIXEL_ARRAY_SIZE, 2, 2);
+    if (!pixelArraySize.count) return NO_INIT;
+
     float arrayAspect = static_cast<float>(fastInfo.arrayWidth) /
             fastInfo.arrayHeight;
     float stillAspect = static_cast<float>(pictureWidth) / pictureHeight;
@@ -3003,6 +3007,16 @@
         vertCropFactor = (arrayAspect < stillAspect) ?
                 (arrayAspect / stillAspect) : 1.f;
     }
+
+    /**
+     * Convert the crop factors w.r.t the active array size to the crop factors
+     * w.r.t the pixel array size.
+     */
+    horizCropFactor *= (static_cast<float>(fastInfo.arrayWidth) /
+                            pixelArraySize.data.i32[0]);
+    vertCropFactor *= (static_cast<float>(fastInfo.arrayHeight) /
+                            pixelArraySize.data.i32[1]);
+
     ALOGV("Horiz crop factor: %f, vert crop fact: %f",
             horizCropFactor, vertCropFactor);
     /**
diff --git a/services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp b/services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp
index 9e7fff8..470624b 100644
--- a/services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp
@@ -635,7 +635,7 @@
     return OK;
 }
 
-void StreamingProcessor::onFrameAvailable() {
+void StreamingProcessor::onFrameAvailable(const BufferItem& /*item*/) {
     ATRACE_CALL();
     Mutex::Autolock l(mMutex);
     if (!mRecordingFrameAvailable) {
diff --git a/services/camera/libcameraservice/api1/client2/StreamingProcessor.h b/services/camera/libcameraservice/api1/client2/StreamingProcessor.h
index 8466af4..1d679a4 100644
--- a/services/camera/libcameraservice/api1/client2/StreamingProcessor.h
+++ b/services/camera/libcameraservice/api1/client2/StreamingProcessor.h
@@ -80,7 +80,7 @@
     status_t incrementStreamingIds();
 
     // Callback for new recording frames from HAL
-    virtual void onFrameAvailable();
+    virtual void onFrameAvailable(const BufferItem& item);
     // Callback from stagefright which returns used recording frames
     void releaseRecordingFrame(const sp<IMemory>& mem);
 
diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp b/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp
index 8f78103..8b7e4b4 100644
--- a/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp
@@ -66,7 +66,7 @@
     disconnect();
 }
 
-void ZslProcessor::onFrameAvailable() {
+void ZslProcessor::onFrameAvailable(const BufferItem& /*item*/) {
     Mutex::Autolock l(mInputMutex);
     if (!mZslBufferAvailable) {
         mZslBufferAvailable = true;
diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessor.h b/services/camera/libcameraservice/api1/client2/ZslProcessor.h
index b6533cf..2099c38 100644
--- a/services/camera/libcameraservice/api1/client2/ZslProcessor.h
+++ b/services/camera/libcameraservice/api1/client2/ZslProcessor.h
@@ -53,7 +53,7 @@
     ~ZslProcessor();
 
     // From mZslConsumer
-    virtual void onFrameAvailable();
+    virtual void onFrameAvailable(const BufferItem& item);
     // From FrameProcessor
     virtual void onResultAvailable(const CaptureResult &result);
 
diff --git a/services/camera/libcameraservice/gui/RingBufferConsumer.cpp b/services/camera/libcameraservice/gui/RingBufferConsumer.cpp
index f8562ec..d0f29de 100644
--- a/services/camera/libcameraservice/gui/RingBufferConsumer.cpp
+++ b/services/camera/libcameraservice/gui/RingBufferConsumer.cpp
@@ -268,7 +268,7 @@
     return OK;
 }
 
-void RingBufferConsumer::onFrameAvailable() {
+void RingBufferConsumer::onFrameAvailable(const android::BufferItem& item) {
     status_t err;
 
     {
@@ -321,7 +321,7 @@
         item.mGraphicBuffer = mSlots[item.mBuf].mGraphicBuffer;
     } // end of mMutex lock
 
-    ConsumerBase::onFrameAvailable();
+    ConsumerBase::onFrameAvailable(item);
 }
 
 void RingBufferConsumer::unpinBuffer(const BufferItem& item) {
diff --git a/services/camera/libcameraservice/gui/RingBufferConsumer.h b/services/camera/libcameraservice/gui/RingBufferConsumer.h
index da97a11..90fd734 100644
--- a/services/camera/libcameraservice/gui/RingBufferConsumer.h
+++ b/services/camera/libcameraservice/gui/RingBufferConsumer.h
@@ -165,7 +165,7 @@
   private:
 
     // Override ConsumerBase::onFrameAvailable
-    virtual void onFrameAvailable();
+    virtual void onFrameAvailable(const android::BufferItem& item);
 
     void pinBufferLocked(const BufferItem& item);
     void unpinBuffer(const BufferItem& item);