am 9c0c41ff: am 42f785c5: am 4fbe49a8: am 96016c2d: am 9944ff09: am 70c3507d: am 450b10c9: am d0f9dc71: am e0a65f30: am f5af3a72: am 6d80b687: audio policy: validate stream type received from binder calls. automerge: ca83dd2 automerge: ba30b45
automerge: 10e898c

* commit '10e898cc583473d4c3604456b48e008f90d82f70':
diff --git a/camera/VendorTagDescriptor.cpp b/camera/VendorTagDescriptor.cpp
index 0dda6b6..dce313a 100644
--- a/camera/VendorTagDescriptor.cpp
+++ b/camera/VendorTagDescriptor.cpp
@@ -206,7 +206,7 @@
         return res;
     }
 
-    size_t sectionCount;
+    size_t sectionCount = 0;
     if (tagCount > 0) {
         if ((res = parcel->readInt32(reinterpret_cast<int32_t*>(&sectionCount))) != OK) {
             ALOGE("%s: could not read section count for.", __FUNCTION__);
diff --git a/include/media/MediaPlayerInterface.h b/include/media/MediaPlayerInterface.h
index cf18a45..c412299 100644
--- a/include/media/MediaPlayerInterface.h
+++ b/include/media/MediaPlayerInterface.h
@@ -208,8 +208,15 @@
 
     void        sendEvent(int msg, int ext1=0, int ext2=0,
                           const Parcel *obj=NULL) {
-        Mutex::Autolock autoLock(mNotifyLock);
-        if (mNotify) mNotify(mCookie, msg, ext1, ext2, obj);
+        notify_callback_f notifyCB;
+        void* cookie;
+        {
+            Mutex::Autolock autoLock(mNotifyLock);
+            notifyCB = mNotify;
+            cookie = mCookie;
+        }
+
+        if (notifyCB) notifyCB(cookie, msg, ext1, ext2, obj);
     }
 
     virtual status_t dump(int fd, const Vector<String16> &args) const {
diff --git a/media/libmedia/AudioTrack.cpp b/media/libmedia/AudioTrack.cpp
index 0a89fbb..97c8d84 100644
--- a/media/libmedia/AudioTrack.cpp
+++ b/media/libmedia/AudioTrack.cpp
@@ -278,7 +278,9 @@
     }
 
     // handle default values first.
-    if (streamType == AUDIO_STREAM_DEFAULT) {
+    // TODO once AudioPolicyManager fully supports audio_attributes_t,
+    //   remove stream "text-to-speech" redirect
+    if ((streamType == AUDIO_STREAM_DEFAULT) || (streamType == AUDIO_STREAM_TTS)) {
         streamType = AUDIO_STREAM_MUSIC;
     }
 
@@ -2124,6 +2126,12 @@
         mStreamType = AUDIO_STREAM_BLUETOOTH_SCO;
         return;
     }
+    // TODO once AudioPolicyManager fully supports audio_attributes_t,
+    //   remove stream remap, the flag will be enough
+    if ((aa.flags & AUDIO_FLAG_BEACON) == AUDIO_FLAG_BEACON) {
+        mStreamType = AUDIO_STREAM_TTS;
+        return;
+    }
 
     // usage to stream type mapping
     switch (aa.usage) {
@@ -2174,7 +2182,7 @@
 
 bool AudioTrack::isValidAttributes(const audio_attributes_t *paa) {
     // has flags that map to a strategy?
-    if ((paa->flags & (AUDIO_FLAG_AUDIBILITY_ENFORCED | AUDIO_FLAG_SCO)) != 0) {
+    if ((paa->flags & (AUDIO_FLAG_AUDIBILITY_ENFORCED | AUDIO_FLAG_SCO | AUDIO_FLAG_BEACON)) != 0) {
         return true;
     }
 
diff --git a/media/libmedia/AudioTrackShared.cpp b/media/libmedia/AudioTrackShared.cpp
index eec025e..561cb24 100644
--- a/media/libmedia/AudioTrackShared.cpp
+++ b/media/libmedia/AudioTrackShared.cpp
@@ -348,7 +348,13 @@
 
 void AudioTrackClientProxy::flush()
 {
-    mCblk->u.mStreaming.mFlush++;
+    // This works for mFrameCountP2 <= 2^30
+    size_t increment = mFrameCountP2 << 1;
+    size_t mask = increment - 1;
+    audio_track_cblk_t* cblk = mCblk;
+    int32_t newFlush = (cblk->u.mStreaming.mRear & mask) |
+                        ((cblk->u.mStreaming.mFlush & ~mask) + increment);
+    android_atomic_release_store(newFlush, &cblk->u.mStreaming.mFlush);
 }
 
 bool AudioTrackClientProxy::clearStreamEndDone() {
@@ -536,17 +542,27 @@
         rear = android_atomic_acquire_load(&cblk->u.mStreaming.mRear);
         front = cblk->u.mStreaming.mFront;
         if (flush != mFlush) {
-            mFlush = flush;
             // effectively obtain then release whatever is in the buffer
-            android_atomic_release_store(rear, &cblk->u.mStreaming.mFront);
-            if (front != rear) {
+            size_t mask = (mFrameCountP2 << 1) - 1;
+            int32_t newFront = (front & ~mask) | (flush & mask);
+            ssize_t filled = rear - newFront;
+            // Rather than shutting down on a corrupt flush, just treat it as a full flush
+            if (!(0 <= filled && (size_t) filled <= mFrameCount)) {
+                ALOGE("mFlush %#x -> %#x, front %#x, rear %#x, mask %#x, newFront %#x, filled %d=%#x",
+                        mFlush, flush, front, rear, mask, newFront, filled, filled);
+                newFront = rear;
+            }
+            mFlush = flush;
+            android_atomic_release_store(newFront, &cblk->u.mStreaming.mFront);
+            // There is no danger from a false positive, so err on the side of caution
+            if (true /*front != newFront*/) {
                 int32_t old = android_atomic_or(CBLK_FUTEX_WAKE, &cblk->mFutex);
                 if (!(old & CBLK_FUTEX_WAKE)) {
                     (void) syscall(__NR_futex, &cblk->mFutex,
                             mClientInServer ? FUTEX_WAKE_PRIVATE : FUTEX_WAKE, 1);
                 }
             }
-            front = rear;
+            front = newFront;
         }
     } else {
         front = android_atomic_acquire_load(&cblk->u.mStreaming.mFront);
@@ -668,6 +684,7 @@
 
     int32_t flush = cblk->u.mStreaming.mFlush;
     if (flush != mFlush) {
+        // FIXME should return an accurate value, but over-estimate is better than under-estimate
         return mFrameCount;
     }
     // the acquire might not be necessary since not doing a subsequent read
diff --git a/media/libmedia/Visualizer.cpp b/media/libmedia/Visualizer.cpp
index c146b8d..f91e3e4 100644
--- a/media/libmedia/Visualizer.cpp
+++ b/media/libmedia/Visualizer.cpp
@@ -52,6 +52,13 @@
 
 Visualizer::~Visualizer()
 {
+    ALOGV("Visualizer::~Visualizer()");
+    if (mCaptureThread != NULL) {
+        mCaptureThread->requestExitAndWait();
+        mCaptureThread.clear();
+    }
+    mCaptureCallBack = NULL;
+    mCaptureFlags = 0;
 }
 
 status_t Visualizer::setEnabled(bool enabled)
@@ -102,20 +109,18 @@
         return INVALID_OPERATION;
     }
 
-    sp<CaptureThread> t = mCaptureThread;
-    if (t != 0) {
-        t->mLock.lock();
+    if (mCaptureThread != 0) {
+        mCaptureLock.unlock();
+        mCaptureThread->requestExitAndWait();
+        mCaptureLock.lock();
     }
+
     mCaptureThread.clear();
     mCaptureCallBack = cbk;
     mCaptureCbkUser = user;
     mCaptureFlags = flags;
     mCaptureRate = rate;
 
-    if (t != 0) {
-        t->mLock.unlock();
-    }
-
     if (cbk != NULL) {
         mCaptureThread = new CaptureThread(*this, rate, ((flags & CAPTURE_CALL_JAVA) != 0));
     }
diff --git a/media/libmediaplayerservice/MediaPlayerService.cpp b/media/libmediaplayerservice/MediaPlayerService.cpp
index 8eb1269..c120898 100644
--- a/media/libmediaplayerservice/MediaPlayerService.cpp
+++ b/media/libmediaplayerservice/MediaPlayerService.cpp
@@ -2159,7 +2159,6 @@
     {
     case MEDIA_ERROR:
         ALOGE("Error %d, %d occurred", ext1, ext2);
-        p->mError = ext1;
         break;
     case MEDIA_PREPARED:
         ALOGV("prepared");
@@ -2174,6 +2173,9 @@
 
     // wake up thread
     Mutex::Autolock lock(p->mLock);
+    if (msg == MEDIA_ERROR) {
+        p->mError = ext1;
+    }
     p->mCommandComplete = true;
     p->mSignal.signal();
 }
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
index a63a940..931451f 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
@@ -306,10 +306,6 @@
     (new AMessage(kWhatPause, id()))->post();
 }
 
-void NuPlayer::resume() {
-    (new AMessage(kWhatResume, id()))->post();
-}
-
 void NuPlayer::resetAsync() {
     if (mSource != NULL) {
         // During a reset, the data source might be unresponsive already, we need to
@@ -574,69 +570,11 @@
         case kWhatStart:
         {
             ALOGV("kWhatStart");
-
-            mVideoIsAVC = false;
-            mOffloadAudio = false;
-            mAudioEOS = false;
-            mVideoEOS = false;
-            mSkipRenderingAudioUntilMediaTimeUs = -1;
-            mSkipRenderingVideoUntilMediaTimeUs = -1;
-            mNumFramesTotal = 0;
-            mNumFramesDropped = 0;
-            mStarted = true;
-
-            /* instantiate decoders now for secure playback */
-            if (mSourceFlags & Source::FLAG_SECURE) {
-                if (mNativeWindow != NULL) {
-                    instantiateDecoder(false, &mVideoDecoder);
-                }
-
-                if (mAudioSink != NULL) {
-                    instantiateDecoder(true, &mAudioDecoder);
-                }
+            if (mStarted) {
+                onResume();
+            } else {
+                onStart();
             }
-
-            mSource->start();
-
-            uint32_t flags = 0;
-
-            if (mSource->isRealTime()) {
-                flags |= Renderer::FLAG_REAL_TIME;
-            }
-
-            sp<MetaData> audioMeta = mSource->getFormatMeta(true /* audio */);
-            audio_stream_type_t streamType = AUDIO_STREAM_MUSIC;
-            if (mAudioSink != NULL) {
-                streamType = mAudioSink->getAudioStreamType();
-            }
-
-            sp<AMessage> videoFormat = mSource->getFormat(false /* audio */);
-
-            mOffloadAudio =
-                canOffloadStream(audioMeta, (videoFormat != NULL),
-                                 true /* is_streaming */, streamType);
-            if (mOffloadAudio) {
-                flags |= Renderer::FLAG_OFFLOAD_AUDIO;
-            }
-
-            sp<AMessage> notify = new AMessage(kWhatRendererNotify, id());
-            ++mRendererGeneration;
-            notify->setInt32("generation", mRendererGeneration);
-            mRenderer = new Renderer(mAudioSink, notify, flags);
-
-            mRendererLooper = new ALooper;
-            mRendererLooper->setName("NuPlayerRenderer");
-            mRendererLooper->start(false, false, ANDROID_PRIORITY_AUDIO);
-            mRendererLooper->registerHandler(mRenderer);
-
-            sp<MetaData> meta = getFileMeta();
-            int32_t rate;
-            if (meta != NULL
-                    && meta->findInt32(kKeyFrameRate, &rate) && rate > 0) {
-                mRenderer->setVideoFrameRate(rate);
-            }
-
-            postScanSources();
             break;
         }
 
@@ -979,26 +917,6 @@
             break;
         }
 
-        case kWhatResume:
-        {
-            if (mSource != NULL) {
-                mSource->resume();
-            } else {
-                ALOGW("resume called when source is gone or not set");
-            }
-            // |mAudioDecoder| may have been released due to the pause timeout, so re-create it if
-            // needed.
-            if (audioDecoderStillNeeded() && mAudioDecoder == NULL) {
-                instantiateDecoder(true /* audio */, &mAudioDecoder);
-            }
-            if (mRenderer != NULL) {
-                mRenderer->resume();
-            } else {
-                ALOGW("resume called when renderer is gone or not set");
-            }
-            break;
-        }
-
         case kWhatSourceNotify:
         {
             onSourceNotify(msg);
@@ -1017,6 +935,89 @@
     }
 }
 
+void NuPlayer::onResume() {
+    if (mSource != NULL) {
+        mSource->resume();
+    } else {
+        ALOGW("resume called when source is gone or not set");
+    }
+    // |mAudioDecoder| may have been released due to the pause timeout, so re-create it if
+    // needed.
+    if (audioDecoderStillNeeded() && mAudioDecoder == NULL) {
+        instantiateDecoder(true /* audio */, &mAudioDecoder);
+    }
+    if (mRenderer != NULL) {
+        mRenderer->resume();
+    } else {
+        ALOGW("resume called when renderer is gone or not set");
+    }
+}
+
+void NuPlayer::onStart() {
+    mVideoIsAVC = false;
+    mOffloadAudio = false;
+    mAudioEOS = false;
+    mVideoEOS = false;
+    mSkipRenderingAudioUntilMediaTimeUs = -1;
+    mSkipRenderingVideoUntilMediaTimeUs = -1;
+    mNumFramesTotal = 0;
+    mNumFramesDropped = 0;
+    mStarted = true;
+
+    /* instantiate decoders now for secure playback */
+    if (mSourceFlags & Source::FLAG_SECURE) {
+        if (mNativeWindow != NULL) {
+            instantiateDecoder(false, &mVideoDecoder);
+        }
+
+        if (mAudioSink != NULL) {
+            instantiateDecoder(true, &mAudioDecoder);
+        }
+    }
+
+    mSource->start();
+
+    uint32_t flags = 0;
+
+    if (mSource->isRealTime()) {
+        flags |= Renderer::FLAG_REAL_TIME;
+    }
+
+    sp<MetaData> audioMeta = mSource->getFormatMeta(true /* audio */);
+    audio_stream_type_t streamType = AUDIO_STREAM_MUSIC;
+    if (mAudioSink != NULL) {
+        streamType = mAudioSink->getAudioStreamType();
+    }
+
+    sp<AMessage> videoFormat = mSource->getFormat(false /* audio */);
+
+    mOffloadAudio =
+        canOffloadStream(audioMeta, (videoFormat != NULL),
+                         true /* is_streaming */, streamType);
+    if (mOffloadAudio) {
+        flags |= Renderer::FLAG_OFFLOAD_AUDIO;
+    }
+
+    sp<AMessage> notify = new AMessage(kWhatRendererNotify, id());
+    ++mRendererGeneration;
+    notify->setInt32("generation", mRendererGeneration);
+    mRenderer = new Renderer(mAudioSink, notify, flags);
+
+    mRendererLooper = new ALooper;
+    mRendererLooper->setName("NuPlayerRenderer");
+    mRendererLooper->start(false, false, ANDROID_PRIORITY_AUDIO);
+    mRendererLooper->registerHandler(mRenderer);
+
+    sp<MetaData> meta = getFileMeta();
+    int32_t rate;
+    if (meta != NULL
+            && meta->findInt32(kKeyFrameRate, &rate) && rate > 0) {
+        mRenderer->setVideoFrameRate(rate);
+    }
+
+    postScanSources();
+}
+
 bool NuPlayer::audioDecoderStillNeeded() {
     // Audio decoder is no longer needed if it's in shut/shutting down status.
     return ((mFlushingAudio != SHUT_DOWN) && (mFlushingAudio != SHUTTING_DOWN_DECODER));
@@ -1119,7 +1120,7 @@
     // Current code will just make that we select deep buffer
     // with video which should not be a problem as it should
     // not prevent from keeping A/V sync.
-    if (hasVideo &&
+    if (!hasVideo &&
             mSource->getDuration(&durationUs) == OK &&
             durationUs
                 > AUDIO_SINK_MIN_DEEP_BUFFER_DURATION_US) {
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.h b/media/libmediaplayerservice/nuplayer/NuPlayer.h
index d6120d2..14056ca 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.h
@@ -54,7 +54,6 @@
     void start();
 
     void pause();
-    void resume();
 
     // Will notify the driver through "notifyResetComplete" once finished.
     void resetAsync();
@@ -213,6 +212,9 @@
     void handleFlushComplete(bool audio, bool isDecoder);
     void finishFlushIfPossible();
 
+    void onStart();
+    void onResume();
+
     bool audioDecoderStillNeeded();
 
     void flushDecoder(
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
index ab46074..b9a1a6c 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
@@ -239,16 +239,24 @@
             // fall through
         }
 
+        case STATE_PAUSED:
+        case STATE_STOPPED_AND_PREPARED:
+        {
+            if (mAtEOS && mStartupSeekTimeUs < 0) {
+                mStartupSeekTimeUs = 0;
+                mPositionUs = -1;
+            }
+
+            // fall through
+        }
+
         case STATE_PREPARED:
         {
             mAtEOS = false;
             mPlayer->start();
 
             if (mStartupSeekTimeUs >= 0) {
-                if (mStartupSeekTimeUs > 0) {
-                    mPlayer->seekToAsync(mStartupSeekTimeUs);
-                }
-
+                mPlayer->seekToAsync(mStartupSeekTimeUs);
                 mStartupSeekTimeUs = -1;
             }
             break;
@@ -264,20 +272,6 @@
             break;
         }
 
-        case STATE_PAUSED:
-        case STATE_STOPPED_AND_PREPARED:
-        {
-            if (mAtEOS) {
-                mPlayer->seekToAsync(0);
-                mAtEOS = false;
-                mPlayer->resume();
-                mPositionUs = -1;
-            } else {
-                mPlayer->resume();
-            }
-            break;
-        }
-
         default:
             return INVALID_OPERATION;
     }
@@ -348,6 +342,7 @@
 
     switch (mState) {
         case STATE_PREPARED:
+        case STATE_STOPPED_AND_PREPARED:
         {
             mStartupSeekTimeUs = seekTimeUs;
             // pretend that the seek completed. It will actually happen when starting playback.
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
index 86ce385..5d9001c 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
@@ -38,7 +38,7 @@
 
 // Maximum time in paused state when offloading audio decompression. When elapsed, the AudioSink
 // is closed to allow the audio DSP to power down.
-static const int64_t kOffloadPauseMaxUs = 60000000ll;
+static const int64_t kOffloadPauseMaxUs = 10000000ll;
 
 // static
 const int64_t NuPlayer::Renderer::kMinPositionUpdateDelayUs = 100000ll;
@@ -1374,8 +1374,9 @@
                 return offloadingAudio();
             }
             ALOGV("openAudioSink: try to open AudioSink in offload mode");
-            flags |= AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD;
-            flags &= ~AUDIO_OUTPUT_FLAG_DEEP_BUFFER;
+            uint32_t offloadFlags = flags;
+            offloadFlags |= AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD;
+            offloadFlags &= ~AUDIO_OUTPUT_FLAG_DEEP_BUFFER;
             audioSinkChanged = true;
             mAudioSink->close();
             err = mAudioSink->open(
@@ -1386,7 +1387,7 @@
                     8 /* bufferCount */,
                     &NuPlayer::Renderer::AudioSinkCallback,
                     this,
-                    (audio_output_flags_t)flags,
+                    (audio_output_flags_t)offloadFlags,
                     &offloadInfo);
 
             if (err == OK) {
@@ -1410,9 +1411,9 @@
         }
     }
     if (!offloadOnly && !offloadingAudio()) {
-        flags &= ~AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD;
         ALOGV("openAudioSink: open AudioSink in NON-offload mode");
-
+        uint32_t pcmFlags = flags;
+        pcmFlags &= ~AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD;
         audioSinkChanged = true;
         mAudioSink->close();
         mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER;
@@ -1424,7 +1425,7 @@
                     8 /* bufferCount */,
                     NULL,
                     NULL,
-                    (audio_output_flags_t)flags),
+                    (audio_output_flags_t)pcmFlags),
                  (status_t)OK);
         mAudioSink->start();
     }
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index 2f2f9cf..0e9d734 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -2221,7 +2221,11 @@
 
     video_def->xFramerate = (OMX_U32)(frameRate * 65536.0f);
     video_def->eCompressionFormat = OMX_VIDEO_CodingUnused;
-    video_def->eColorFormat = colorFormat;
+    // this is redundant as it was already set up in setVideoPortFormatType
+    // FIXME for now skip this only for flexible YUV formats
+    if (colorFormat != OMX_COLOR_FormatYUV420Flexible) {
+        video_def->eColorFormat = colorFormat;
+    }
 
     err = mOMX->setParameter(
             mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
diff --git a/media/libstagefright/AwesomePlayer.cpp b/media/libstagefright/AwesomePlayer.cpp
index ab8ac79..6a56729 100644
--- a/media/libstagefright/AwesomePlayer.cpp
+++ b/media/libstagefright/AwesomePlayer.cpp
@@ -75,7 +75,7 @@
 
 // maximum time in paused state when offloading audio decompression. When elapsed, the AudioPlayer
 // is destroyed to allow the audio DSP to power down.
-static int64_t kOffloadPauseMaxUs = 60000000ll;
+static int64_t kOffloadPauseMaxUs = 10000000ll;
 
 
 struct AwesomeEvent : public TimedEventQueue::Event {
diff --git a/media/libstagefright/CameraSource.cpp b/media/libstagefright/CameraSource.cpp
index f76aed6..c3a940a 100644
--- a/media/libstagefright/CameraSource.cpp
+++ b/media/libstagefright/CameraSource.cpp
@@ -130,6 +130,7 @@
          "CameraSource::getColorFormat", colorFormat);
 
     CHECK(!"Unknown color format");
+    return -1;
 }
 
 CameraSource *CameraSource::Create(const String16 &clientName) {
diff --git a/media/libstagefright/MPEG4Writer.cpp b/media/libstagefright/MPEG4Writer.cpp
index 4b8440b..9f20b1d 100644
--- a/media/libstagefright/MPEG4Writer.cpp
+++ b/media/libstagefright/MPEG4Writer.cpp
@@ -41,8 +41,13 @@
 
 #include "include/ESDS.h"
 
+
+#ifndef __predict_false
+#define __predict_false(exp) __builtin_expect((exp) != 0, 0)
+#endif
+
 #define WARN_UNLESS(condition, message, ...) \
-( (CONDITION(condition)) ? false : ({ \
+( (__predict_false(condition)) ? false : ({ \
     ALOGW("Condition %s failed "  message, #condition, ##__VA_ARGS__); \
     true; \
 }))
diff --git a/media/libstagefright/OMXCodec.cpp b/media/libstagefright/OMXCodec.cpp
index a8806c8..288e07a 100644
--- a/media/libstagefright/OMXCodec.cpp
+++ b/media/libstagefright/OMXCodec.cpp
@@ -817,6 +817,7 @@
             CHECK(!"Should not be here. Unsupported color format.");
             break;
     }
+    return 0;
 }
 
 status_t OMXCodec::findTargetColorFormat(
diff --git a/media/libstagefright/codecs/aacdec/SoftAAC2.cpp b/media/libstagefright/codecs/aacdec/SoftAAC2.cpp
index 40925fd..351ba1e 100644
--- a/media/libstagefright/codecs/aacdec/SoftAAC2.cpp
+++ b/media/libstagefright/codecs/aacdec/SoftAAC2.cpp
@@ -929,33 +929,22 @@
         }
 
         if (mEndOfInput) {
-            if (outputDelayRingBufferSamplesAvailable() > 0
-                    && outputDelayRingBufferSamplesAvailable()
-                            < mStreamInfo->frameSize * mStreamInfo->numChannels) {
-                ALOGE("not a complete frame of samples available");
-                mSignalledError = true;
-                notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL);
-                return;
-            }
-
-            if (mEndOfInput && !outQueue.empty() && outputDelayRingBufferSamplesAvailable() == 0) {
+            int ringBufAvail = outputDelayRingBufferSamplesAvailable();
+            if (!outQueue.empty()
+                    && ringBufAvail < mStreamInfo->frameSize * mStreamInfo->numChannels) {
                 if (!mEndOfOutput) {
-                    // send empty block signaling EOS
+                    // send partial or empty block signaling EOS
                     mEndOfOutput = true;
                     BufferInfo *outInfo = *outQueue.begin();
                     OMX_BUFFERHEADERTYPE *outHeader = outInfo->mHeader;
 
-                    if (outHeader->nOffset != 0) {
-                        ALOGE("outHeader->nOffset != 0 is not handled");
-                        mSignalledError = true;
-                        notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL);
-                        return;
-                    }
-
                     INT_PCM *outBuffer = reinterpret_cast<INT_PCM *>(outHeader->pBuffer
                             + outHeader->nOffset);
-                    int32_t ns = 0;
-                    outHeader->nFilledLen = 0;
+                    int32_t ns = outputDelayRingBufferGetSamples(outBuffer, ringBufAvail);
+                    if (ns < 0) {
+                        ns = 0;
+                    }
+                    outHeader->nFilledLen = ns;
                     outHeader->nFlags = OMX_BUFFERFLAG_EOS;
 
                     outHeader->nTimeStamp = mBufferTimestamps.itemAt(0);
@@ -994,7 +983,7 @@
             }
             int32_t ns = outputDelayRingBufferGetSamples(0, avail);
             if (ns != avail) {
-                ALOGE("not a complete frame of samples available");
+                ALOGW("not a complete frame of samples available");
                 break;
             }
             mOutputBufferCount++;
diff --git a/media/libstagefright/codecs/avc/enc/SoftAVCEncoder.cpp b/media/libstagefright/codecs/avc/enc/SoftAVCEncoder.cpp
index ed3dca0..bb55871 100644
--- a/media/libstagefright/codecs/avc/enc/SoftAVCEncoder.cpp
+++ b/media/libstagefright/codecs/avc/enc/SoftAVCEncoder.cpp
@@ -575,9 +575,13 @@
                     &editPortInfo(0)->mDef;
                 portDef->format.video.nFrameWidth = mVideoWidth;
                 portDef->format.video.nFrameHeight = mVideoHeight;
+                portDef->format.video.nStride = portDef->format.video.nFrameWidth;
+                portDef->format.video.nSliceHeight = portDef->format.video.nFrameHeight;
                 portDef->format.video.xFramerate = def->format.video.xFramerate;
                 portDef->format.video.eColorFormat =
                     (OMX_COLOR_FORMATTYPE) mVideoColorFormat;
+                portDef->nBufferSize =
+                    (portDef->format.video.nStride * portDef->format.video.nSliceHeight * 3) / 2;
                 portDef = &editPortInfo(1)->mDef;
                 portDef->format.video.nFrameWidth = mVideoWidth;
                 portDef->format.video.nFrameHeight = mVideoHeight;
diff --git a/media/libstagefright/codecs/m4v_h263/dec/SoftMPEG4.cpp b/media/libstagefright/codecs/m4v_h263/dec/SoftMPEG4.cpp
index 1f4b6fd..e399984 100644
--- a/media/libstagefright/codecs/m4v_h263/dec/SoftMPEG4.cpp
+++ b/media/libstagefright/codecs/m4v_h263/dec/SoftMPEG4.cpp
@@ -382,5 +382,6 @@
     } else {
         CHECK(!"Unknown component");
     }
+    return NULL;
 }
 
diff --git a/media/libstagefright/codecs/m4v_h263/enc/SoftMPEG4Encoder.cpp b/media/libstagefright/codecs/m4v_h263/enc/SoftMPEG4Encoder.cpp
index c87d19c..400f320 100644
--- a/media/libstagefright/codecs/m4v_h263/enc/SoftMPEG4Encoder.cpp
+++ b/media/libstagefright/codecs/m4v_h263/enc/SoftMPEG4Encoder.cpp
@@ -462,9 +462,13 @@
                     &editPortInfo(0)->mDef;
                 portDef->format.video.nFrameWidth = mVideoWidth;
                 portDef->format.video.nFrameHeight = mVideoHeight;
+                portDef->format.video.nStride = portDef->format.video.nFrameWidth;
+                portDef->format.video.nSliceHeight = portDef->format.video.nFrameHeight;
                 portDef->format.video.xFramerate = def->format.video.xFramerate;
                 portDef->format.video.eColorFormat =
                     (OMX_COLOR_FORMATTYPE) mVideoColorFormat;
+                portDef->nBufferSize =
+                    (portDef->format.video.nStride * portDef->format.video.nSliceHeight * 3) / 2;
                 portDef = &editPortInfo(1)->mDef;
                 portDef->format.video.nFrameWidth = mVideoWidth;
                 portDef->format.video.nFrameHeight = mVideoHeight;
diff --git a/media/libstagefright/codecs/on2/dec/SoftVPX.cpp b/media/libstagefright/codecs/on2/dec/SoftVPX.cpp
index 828577a..87d6961 100644
--- a/media/libstagefright/codecs/on2/dec/SoftVPX.cpp
+++ b/media/libstagefright/codecs/on2/dec/SoftVPX.cpp
@@ -189,4 +189,5 @@
     } else {
         CHECK(!"Unknown component");
     }
+    return NULL;
 }
diff --git a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp
index eb621d5..0285feb 100644
--- a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp
+++ b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp
@@ -805,8 +805,12 @@
         OMX_PARAM_PORTDEFINITIONTYPE *def = &editPortInfo(kInputPortIndex)->mDef;
         def->format.video.nFrameWidth = mWidth;
         def->format.video.nFrameHeight = mHeight;
+        def->format.video.nStride = def->format.video.nFrameWidth;
+        def->format.video.nSliceHeight = def->format.video.nFrameHeight;
         def->format.video.xFramerate = mFramerate;
         def->format.video.eColorFormat = mColorFormat;
+        def->nBufferSize =
+            (def->format.video.nStride * def->format.video.nSliceHeight * 3) / 2;
         def = &editPortInfo(kOutputPortIndex)->mDef;
         def->format.video.nFrameWidth = mWidth;
         def->format.video.nFrameHeight = mHeight;
diff --git a/media/libstagefright/mpeg2ts/ATSParser.cpp b/media/libstagefright/mpeg2ts/ATSParser.cpp
index 6d8866a..eab7616 100644
--- a/media/libstagefright/mpeg2ts/ATSParser.cpp
+++ b/media/libstagefright/mpeg2ts/ATSParser.cpp
@@ -244,11 +244,16 @@
 status_t ATSParser::Program::parseProgramMap(ABitReader *br) {
     unsigned table_id = br->getBits(8);
     ALOGV("  table_id = %u", table_id);
-    CHECK_EQ(table_id, 0x02u);
-
+    if (table_id != 0x02u) {
+        ALOGE("PMT data error!");
+        return ERROR_MALFORMED;
+    }
     unsigned section_syntax_indicator = br->getBits(1);
     ALOGV("  section_syntax_indicator = %u", section_syntax_indicator);
-    CHECK_EQ(section_syntax_indicator, 1u);
+    if (section_syntax_indicator != 1u) {
+        ALOGE("PMT data error!");
+        return ERROR_MALFORMED;
+    }
 
     CHECK_EQ(br->getBits(1), 0u);
     MY_LOGV("  reserved = %u", br->getBits(2));
@@ -739,8 +744,10 @@
         if (PTS_DTS_flags == 2 || PTS_DTS_flags == 3) {
             CHECK_GE(optional_bytes_remaining, 5u);
 
-            CHECK_EQ(br->getBits(4), PTS_DTS_flags);
-
+            if (br->getBits(4) != PTS_DTS_flags) {
+                ALOGE("PES data Error!");
+                return ERROR_MALFORMED;
+            }
             PTS = ((uint64_t)br->getBits(3)) << 30;
             CHECK_EQ(br->getBits(1), 1u);
             PTS |= ((uint64_t)br->getBits(15)) << 15;
@@ -1003,8 +1010,10 @@
 void ATSParser::parseProgramAssociationTable(ABitReader *br) {
     unsigned table_id = br->getBits(8);
     ALOGV("  table_id = %u", table_id);
-    CHECK_EQ(table_id, 0x00u);
-
+    if (table_id != 0x00u) {
+        ALOGE("PAT data error!");
+        return ;
+    }
     unsigned section_syntax_indictor = br->getBits(1);
     ALOGV("  section_syntax_indictor = %u", section_syntax_indictor);
     CHECK_EQ(section_syntax_indictor, 1u);
@@ -1074,7 +1083,9 @@
         sp<PSISection> section = mPSISections.valueAt(sectionIndex);
 
         if (payload_unit_start_indicator) {
-            CHECK(section->isEmpty());
+            if (!section->isEmpty()) {
+                return ERROR_UNSUPPORTED;
+            }
 
             unsigned skip = br->getBits(8);
             br->skipBits(skip * 8);
@@ -1203,7 +1214,10 @@
     ALOGV("---");
 
     unsigned sync_byte = br->getBits(8);
-    CHECK_EQ(sync_byte, 0x47u);
+    if (sync_byte != 0x47u) {
+        ALOGE("[error] parseTS: return error as sync_byte=0x%x", sync_byte);
+        return BAD_VALUE;
+    }
 
     if (br->getBits(1)) {  // transport_error_indicator
         // silently ignore.
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index e48af20..7b65de7 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -650,6 +650,7 @@
             }
         }
 
+        setAudioHwSyncForSession_l(thread, (audio_session_t)lSessionId);
     }
 
     if (lStatus != NO_ERROR) {
@@ -1604,22 +1605,69 @@
 audio_hw_sync_t AudioFlinger::getAudioHwSyncForSession(audio_session_t sessionId)
 {
     Mutex::Autolock _l(mLock);
-    for (size_t i = 0; i < mPlaybackThreads.size(); i++) {
-        sp<PlaybackThread> thread = mPlaybackThreads.valueAt(i);
-        if ((thread->hasAudioSession(sessionId) & ThreadBase::TRACK_SESSION) != 0) {
-            // A session can only be on one thread, so exit after first match
-            String8 reply = thread->getParameters(String8(AUDIO_PARAMETER_STREAM_HW_AV_SYNC));
-            AudioParameter param = AudioParameter(reply);
-            int value;
-            if (param.getInt(String8(AUDIO_PARAMETER_STREAM_HW_AV_SYNC), value) == NO_ERROR) {
-                return value;
-            }
+
+    ssize_t index = mHwAvSyncIds.indexOfKey(sessionId);
+    if (index >= 0) {
+        ALOGV("getAudioHwSyncForSession found ID %d for session %d",
+              mHwAvSyncIds.valueAt(index), sessionId);
+        return mHwAvSyncIds.valueAt(index);
+    }
+
+    audio_hw_device_t *dev = mPrimaryHardwareDev->hwDevice();
+    if (dev == NULL) {
+        return AUDIO_HW_SYNC_INVALID;
+    }
+    char *reply = dev->get_parameters(dev, AUDIO_PARAMETER_HW_AV_SYNC);
+    AudioParameter param = AudioParameter(String8(reply));
+    free(reply);
+
+    int value;
+    if (param.getInt(String8(AUDIO_PARAMETER_HW_AV_SYNC), value) != NO_ERROR) {
+        ALOGW("getAudioHwSyncForSession error getting sync for session %d", sessionId);
+        return AUDIO_HW_SYNC_INVALID;
+    }
+
+    // allow only one session for a given HW A/V sync ID.
+    for (size_t i = 0; i < mHwAvSyncIds.size(); i++) {
+        if (mHwAvSyncIds.valueAt(i) == (audio_hw_sync_t)value) {
+            ALOGV("getAudioHwSyncForSession removing ID %d for session %d",
+                  value, mHwAvSyncIds.keyAt(i));
+            mHwAvSyncIds.removeItemsAt(i);
             break;
         }
     }
-    return AUDIO_HW_SYNC_INVALID;
+
+    mHwAvSyncIds.add(sessionId, value);
+
+    for (size_t i = 0; i < mPlaybackThreads.size(); i++) {
+        sp<PlaybackThread> thread = mPlaybackThreads.valueAt(i);
+        uint32_t sessions = thread->hasAudioSession(sessionId);
+        if (sessions & PlaybackThread::TRACK_SESSION) {
+            AudioParameter param = AudioParameter();
+            param.addInt(String8(AUDIO_PARAMETER_STREAM_HW_AV_SYNC), value);
+            thread->setParameters(param.toString());
+            break;
+        }
+    }
+
+    ALOGV("getAudioHwSyncForSession adding ID %d for session %d", value, sessionId);
+    return (audio_hw_sync_t)value;
 }
 
+// setAudioHwSyncForSession_l() must be called with AudioFlinger::mLock held
+void AudioFlinger::setAudioHwSyncForSession_l(PlaybackThread *thread, audio_session_t sessionId)
+{
+    ssize_t index = mHwAvSyncIds.indexOfKey(sessionId);
+    if (index >= 0) {
+        audio_hw_sync_t syncId = mHwAvSyncIds.valueAt(index);
+        ALOGV("setAudioHwSyncForSession_l found ID %d for session %d", syncId, sessionId);
+        AudioParameter param = AudioParameter();
+        param.addInt(String8(AUDIO_PARAMETER_STREAM_HW_AV_SYNC), syncId);
+        thread->setParameters(param.toString());
+    }
+}
+
+
 // ----------------------------------------------------------------------------
 
 
@@ -1928,13 +1976,13 @@
     status_t status = inHwHal->open_input_stream(inHwHal, *input, device, &halconfig,
                                         &inStream, flags, address.string(), source);
     ALOGV("openInput_l() openInputStream returned input %p, SamplingRate %d"
-           ", Format %#x, Channels %x, flags %#x, status %d",
+           ", Format %#x, Channels %x, flags %#x, status %d addr %s",
             inStream,
             halconfig.sample_rate,
             halconfig.format,
             halconfig.channel_mask,
             flags,
-            status);
+            status, address.string());
 
     // If the input could not be opened with the requested parameters and we can handle the
     // conversion internally, try to open again with the proposed parameters. The AudioFlinger can
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index 1003017..4fb372d 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -733,6 +733,8 @@
                 // Effect chains without a valid thread
                 DefaultKeyedVector< audio_session_t , sp<EffectChain> > mOrphanEffectChains;
 
+                // list of sessions for which a valid HW A/V sync ID was retrieved from the HAL
+                DefaultKeyedVector< audio_session_t , audio_hw_sync_t >mHwAvSyncIds;
 private:
     sp<Client>  registerPid(pid_t pid);    // always returns non-0
 
@@ -741,6 +743,7 @@
     void        closeOutputInternal_l(sp<PlaybackThread> thread);
     status_t    closeInput_nonvirtual(audio_io_handle_t input);
     void        closeInputInternal_l(sp<RecordThread> thread);
+    void        setAudioHwSyncForSession_l(PlaybackThread *thread, audio_session_t sessionId);
 
 #ifdef TEE_SINK
     // all record threads serially share a common tee sink, which is re-created on format change
diff --git a/services/audiopolicy/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/AudioPolicyInterfaceImpl.cpp
index dceda97..c06ca72 100644
--- a/services/audiopolicy/AudioPolicyInterfaceImpl.cpp
+++ b/services/audiopolicy/AudioPolicyInterfaceImpl.cpp
@@ -129,8 +129,11 @@
                                     audio_output_flags_t flags,
                                     const audio_offload_info_t *offloadInfo)
 {
+    if (uint32_t(stream) >= AUDIO_STREAM_CNT) {
+        return AUDIO_IO_HANDLE_NONE;
+    }
     if (mAudioPolicyManager == NULL) {
-        return 0;
+        return AUDIO_IO_HANDLE_NONE;
     }
     ALOGV("getOutput()");
     Mutex::Autolock _l(mLock);
@@ -158,6 +161,9 @@
                                          audio_stream_type_t stream,
                                          int session)
 {
+    if (uint32_t(stream) >= AUDIO_STREAM_CNT) {
+        return BAD_VALUE;
+    }
     if (mAudioPolicyManager == NULL) {
         return NO_INIT;
     }
@@ -182,6 +188,9 @@
                                         audio_stream_type_t stream,
                                         int session)
 {
+    if (uint32_t(stream) >= AUDIO_STREAM_CNT) {
+        return BAD_VALUE;
+    }
     if (mAudioPolicyManager == NULL) {
         return NO_INIT;
     }
@@ -368,6 +377,9 @@
 
 uint32_t AudioPolicyService::getStrategyForStream(audio_stream_type_t stream)
 {
+    if (uint32_t(stream) >= AUDIO_STREAM_CNT) {
+        return 0;
+    }
     if (mAudioPolicyManager == NULL) {
         return 0;
     }
@@ -378,8 +390,11 @@
 
 audio_devices_t AudioPolicyService::getDevicesForStream(audio_stream_type_t stream)
 {
+    if (uint32_t(stream) >= AUDIO_STREAM_CNT) {
+        return AUDIO_DEVICE_NONE;
+    }
     if (mAudioPolicyManager == NULL) {
-        return (audio_devices_t)0;
+        return AUDIO_DEVICE_NONE;
     }
     return mAudioPolicyManager->getDevicesForStream(stream);
 }
@@ -424,8 +439,11 @@
 
 bool AudioPolicyService::isStreamActive(audio_stream_type_t stream, uint32_t inPastMs) const
 {
+    if (uint32_t(stream) >= AUDIO_STREAM_CNT) {
+        return false;
+    }
     if (mAudioPolicyManager == NULL) {
-        return 0;
+        return false;
     }
     Mutex::Autolock _l(mLock);
     return mAudioPolicyManager->isStreamActive(stream, inPastMs);
@@ -433,8 +451,11 @@
 
 bool AudioPolicyService::isStreamActiveRemotely(audio_stream_type_t stream, uint32_t inPastMs) const
 {
+    if (uint32_t(stream) >= AUDIO_STREAM_CNT) {
+        return false;
+    }
     if (mAudioPolicyManager == NULL) {
-        return 0;
+        return false;
     }
     Mutex::Autolock _l(mLock);
     return mAudioPolicyManager->isStreamActiveRemotely(stream, inPastMs);
diff --git a/services/audiopolicy/AudioPolicyInterfaceImplLegacy.cpp b/services/audiopolicy/AudioPolicyInterfaceImplLegacy.cpp
index ac41968..09476c1 100644
--- a/services/audiopolicy/AudioPolicyInterfaceImplLegacy.cpp
+++ b/services/audiopolicy/AudioPolicyInterfaceImplLegacy.cpp
@@ -134,8 +134,11 @@
                                     audio_output_flags_t flags,
                                     const audio_offload_info_t *offloadInfo)
 {
+    if (uint32_t(stream) >= AUDIO_STREAM_CNT) {
+        return AUDIO_IO_HANDLE_NONE;
+    }
     if (mpAudioPolicy == NULL) {
-        return 0;
+        return AUDIO_IO_HANDLE_NONE;
     }
     ALOGV("getOutput()");
     Mutex::Autolock _l(mLock);
@@ -147,6 +150,9 @@
                                          audio_stream_type_t stream,
                                          int session)
 {
+    if (uint32_t(stream) >= AUDIO_STREAM_CNT) {
+        return BAD_VALUE;
+    }
     if (mpAudioPolicy == NULL) {
         return NO_INIT;
     }
@@ -172,6 +178,9 @@
                                         audio_stream_type_t stream,
                                         int session)
 {
+    if (uint32_t(stream) >= AUDIO_STREAM_CNT) {
+        return BAD_VALUE;
+    }
     if (mpAudioPolicy == NULL) {
         return NO_INIT;
     }
@@ -370,6 +379,9 @@
 
 uint32_t AudioPolicyService::getStrategyForStream(audio_stream_type_t stream)
 {
+    if (uint32_t(stream) >= AUDIO_STREAM_CNT) {
+        return 0;
+    }
     if (mpAudioPolicy == NULL) {
         return 0;
     }
@@ -380,8 +392,11 @@
 
 audio_devices_t AudioPolicyService::getDevicesForStream(audio_stream_type_t stream)
 {
+    if (uint32_t(stream) >= AUDIO_STREAM_CNT) {
+        return AUDIO_DEVICE_NONE;
+    }
     if (mpAudioPolicy == NULL) {
-        return (audio_devices_t)0;
+        return AUDIO_DEVICE_NONE;
     }
     return mpAudioPolicy->get_devices_for_stream(mpAudioPolicy, stream);
 }
@@ -426,8 +441,11 @@
 
 bool AudioPolicyService::isStreamActive(audio_stream_type_t stream, uint32_t inPastMs) const
 {
+    if (uint32_t(stream) >= AUDIO_STREAM_CNT) {
+        return false;
+    }
     if (mpAudioPolicy == NULL) {
-        return 0;
+        return false;
     }
     Mutex::Autolock _l(mLock);
     return mpAudioPolicy->is_stream_active(mpAudioPolicy, stream, inPastMs);
@@ -435,8 +453,11 @@
 
 bool AudioPolicyService::isStreamActiveRemotely(audio_stream_type_t stream, uint32_t inPastMs) const
 {
+    if (uint32_t(stream) >= AUDIO_STREAM_CNT) {
+        return false;
+    }
     if (mpAudioPolicy == NULL) {
-        return 0;
+        return false;
     }
     Mutex::Autolock _l(mLock);
     return mpAudioPolicy->is_stream_active_remotely(mpAudioPolicy, stream, inPastMs);
diff --git a/services/audiopolicy/AudioPolicyManager.cpp b/services/audiopolicy/AudioPolicyManager.cpp
index c437551..584e170 100644
--- a/services/audiopolicy/AudioPolicyManager.cpp
+++ b/services/audiopolicy/AudioPolicyManager.cpp
@@ -216,6 +216,10 @@
                                                   const char *device_address)
 {
     String8 address = (device_address == NULL) ? String8("") : String8(device_address);
+    // handle legacy remote submix case where the address was not always specified
+    if (deviceDistinguishesOnAddress(device) && (address.length() == 0)) {
+        address = String8("0");
+    }
 
     ALOGV("setDeviceConnectionState() device: %x, state %d, address %s",
             device, state, address.string());
@@ -419,6 +423,10 @@
     audio_policy_dev_state_t state = AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE;
     sp<DeviceDescriptor> devDesc = new DeviceDescriptor(String8(""), device);
     devDesc->mAddress = (device_address == NULL) ? String8("") : String8(device_address);
+    // handle legacy remote submix case where the address was not always specified
+    if (deviceDistinguishesOnAddress(device) && (devDesc->mAddress.length() == 0)) {
+        devDesc->mAddress = String8("0");
+    }
     ssize_t index;
     DeviceVector *deviceVector;
 
@@ -854,7 +862,7 @@
         flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_HW_AV_SYNC);
     }
 
-    ALOGV("getOutputForAttr() device %d, samplingRate %d, format %x, channelMask %x, flags %x",
+    ALOGV("getOutputForAttr() device 0x%x, samplingRate %d, format %x, channelMask %x, flags %x",
           device, samplingRate, format, channelMask, flags);
 
     audio_stream_type_t stream = streamTypefromAttributesInt(attr);
@@ -1119,6 +1127,20 @@
         return BAD_VALUE;
     }
 
+    // cannot start playback of STREAM_TTS if any other output is being used
+    uint32_t beaconMuteLatency = 0;
+    if (stream == AUDIO_STREAM_TTS) {
+        ALOGV("\t found BEACON stream");
+        if (isAnyOutputActive(AUDIO_STREAM_TTS /*streamToIgnore*/)) {
+            return INVALID_OPERATION;
+        } else {
+            beaconMuteLatency = handleEventForBeacon(STARTING_BEACON);
+        }
+    } else {
+        // some playback other than beacon starts
+        beaconMuteLatency = handleEventForBeacon(STARTING_OUTPUT);
+    }
+
     sp<AudioOutputDescriptor> outputDesc = mOutputs.valueAt(index);
 
     // increment usage count for this stream on the requested output:
@@ -1130,8 +1152,9 @@
         audio_devices_t newDevice = getNewOutputDevice(output, false /*fromCache*/);
         routing_strategy strategy = getStrategy(stream);
         bool shouldWait = (strategy == STRATEGY_SONIFICATION) ||
-                            (strategy == STRATEGY_SONIFICATION_RESPECTFUL);
-        uint32_t waitMs = 0;
+                            (strategy == STRATEGY_SONIFICATION_RESPECTFUL) ||
+                            (beaconMuteLatency > 0);
+        uint32_t waitMs = beaconMuteLatency;
         bool force = false;
         for (size_t i = 0; i < mOutputs.size(); i++) {
             sp<AudioOutputDescriptor> desc = mOutputs.valueAt(i);
@@ -1145,7 +1168,8 @@
                     force = true;
                 }
                 // wait for audio on other active outputs to be presented when starting
-                // a notification so that audio focus effect can propagate.
+                // a notification so that audio focus effect can propagate, or that a mute/unmute
+                // event occurred for beacon
                 uint32_t latency = desc->latency();
                 if (shouldWait && desc->isActive(latency * 2) && (waitMs < latency)) {
                     waitMs = latency;
@@ -1189,6 +1213,9 @@
 
     sp<AudioOutputDescriptor> outputDesc = mOutputs.valueAt(index);
 
+    // always handle stream stop, check which stream type is stopping
+    handleEventForBeacon(stream == AUDIO_STREAM_TTS ? STOPPING_BEACON : STOPPING_OUTPUT);
+
     // handle special case for sonification while in call
     if (isInCall()) {
         handleIncallSonification(stream, false, false);
@@ -1356,11 +1383,14 @@
     config.channel_mask = channelMask;
     config.format = format;
 
+    // handle legacy remote submix case where the address was not always specified
+    String8 address = deviceDistinguishesOnAddress(device) ? String8("0") : String8("");
+
     status_t status = mpClientInterface->openInput(profile->mModule->mHandle,
                                                    &input,
                                                    &config,
                                                    &device,
-                                                   String8(""),
+                                                   address,
                                                    halInputSource,
                                                    flags);
 
@@ -1584,12 +1614,17 @@
     }
     mStreams[stream].mIndexCur.add(device, index);
 
-    // compute and apply stream volume on all outputs according to connected device
+    // update volume on all outputs whose current device is also selected by the same
+    // strategy as the device specified by the caller
+    audio_devices_t strategyDevice = getDeviceForStrategy(getStrategy(stream), true /*fromCache*/);
+    if ((device != AUDIO_DEVICE_OUT_DEFAULT) && (device & strategyDevice) == 0) {
+        return NO_ERROR;
+    }
     status_t status = NO_ERROR;
     for (size_t i = 0; i < mOutputs.size(); i++) {
         audio_devices_t curDevice =
                 getDeviceForVolume(mOutputs.valueAt(i)->device());
-        if ((device == AUDIO_DEVICE_OUT_DEFAULT) || (device == curDevice)) {
+        if ((device == AUDIO_DEVICE_OUT_DEFAULT) || ((curDevice & strategyDevice) != 0)) {
             status_t volStatus = checkAndSetVolume(stream, index, mOutputs.keyAt(i), curDevice);
             if (volStatus != NO_ERROR) {
                 status = volStatus;
@@ -2653,7 +2688,10 @@
     mTotalEffectsCpuLoad(0), mTotalEffectsMemory(0),
     mA2dpSuspended(false),
     mSpeakerDrcEnabled(false), mNextUniqueId(1),
-    mAudioPortGeneration(1)
+    mAudioPortGeneration(1),
+    mBeaconMuteRefCount(0),
+    mBeaconPlayingRefCount(0),
+    mBeaconMuted(false)
 {
     mUidCached = getuid();
     mpClientInterface = clientInterface;
@@ -2787,6 +2825,14 @@
             inputDesc->mInputSource = AUDIO_SOURCE_MIC;
             inputDesc->mDevice = profileType;
 
+            // find the address
+            DeviceVector inputDevices = mAvailableInputDevices.getDevicesFromType(profileType);
+            //   the inputs vector must be of size 1, but we don't want to crash here
+            String8 address = inputDevices.size() > 0 ? inputDevices.itemAt(0)->mAddress
+                    : String8("");
+            ALOGV("  for input device 0x%x using address %s", profileType, address.string());
+            ALOGE_IF(inputDevices.size() == 0, "Input device list is empty!");
+
             audio_config_t config = AUDIO_CONFIG_INITIALIZER;
             config.sample_rate = inputDesc->mSamplingRate;
             config.channel_mask = inputDesc->mChannelMask;
@@ -2796,7 +2842,7 @@
                                                            &input,
                                                            &config,
                                                            &inputDesc->mDevice,
-                                                           String8(""),
+                                                           address,
                                                            AUDIO_SOURCE_MIC,
                                                            AUDIO_INPUT_FLAG_NONE);
 
@@ -3816,6 +3862,8 @@
     //      use device for strategy media
     // 7: the strategy DTMF is active on the output:
     //      use device for strategy DTMF
+    // 8: the strategy for beacon, a.k.a. "transmitted through speaker" is active on the output:
+    //      use device for strategy t-t-s
     if (outputDesc->isStrategyActive(STRATEGY_ENFORCED_AUDIBLE) &&
         mForceUse[AUDIO_POLICY_FORCE_FOR_SYSTEM] == AUDIO_POLICY_FORCE_SYSTEM_ENFORCED) {
         device = getDeviceForStrategy(STRATEGY_ENFORCED_AUDIBLE, fromCache);
@@ -3832,6 +3880,8 @@
         device = getDeviceForStrategy(STRATEGY_MEDIA, fromCache);
     } else if (outputDesc->isStrategyActive(STRATEGY_DTMF)) {
         device = getDeviceForStrategy(STRATEGY_DTMF, fromCache);
+    } else if (outputDesc->isStrategyActive(STRATEGY_TRANSMITTED_THROUGH_SPEAKER)) {
+        device = getDeviceForStrategy(STRATEGY_TRANSMITTED_THROUGH_SPEAKER, fromCache);
     }
 
     ALOGV("getNewOutputDevice() selected device %x", device);
@@ -3910,16 +3960,20 @@
     case AUDIO_STREAM_SYSTEM:
         // NOTE: SYSTEM stream uses MEDIA strategy because muting music and switching outputs
         // while key clicks are played produces a poor result
-    case AUDIO_STREAM_TTS:
     case AUDIO_STREAM_MUSIC:
         return STRATEGY_MEDIA;
     case AUDIO_STREAM_ENFORCED_AUDIBLE:
         return STRATEGY_ENFORCED_AUDIBLE;
+    case AUDIO_STREAM_TTS:
+        return STRATEGY_TRANSMITTED_THROUGH_SPEAKER;
     }
 }
 
 uint32_t AudioPolicyManager::getStrategyForAttr(const audio_attributes_t *attr) {
     // flags to strategy mapping
+    if ((attr->flags & AUDIO_FLAG_BEACON) == AUDIO_FLAG_BEACON) {
+        return (uint32_t) STRATEGY_TRANSMITTED_THROUGH_SPEAKER;
+    }
     if ((attr->flags & AUDIO_FLAG_AUDIBILITY_ENFORCED) == AUDIO_FLAG_AUDIBILITY_ENFORCED) {
         return (uint32_t) STRATEGY_ENFORCED_AUDIBLE;
     }
@@ -3967,6 +4021,74 @@
     }
 }
 
+bool AudioPolicyManager::isAnyOutputActive(audio_stream_type_t streamToIgnore) {
+    for (size_t s = 0 ; s < AUDIO_STREAM_CNT ; s++) {
+        if (s == (size_t) streamToIgnore) {
+            continue;
+        }
+        for (size_t i = 0; i < mOutputs.size(); i++) {
+            const sp<AudioOutputDescriptor> outputDesc = mOutputs.valueAt(i);
+            if (outputDesc->mRefCount[s] != 0) {
+                return true;
+            }
+        }
+    }
+    return false;
+}
+
+uint32_t AudioPolicyManager::handleEventForBeacon(int event) {
+    switch(event) {
+    case STARTING_OUTPUT:
+        mBeaconMuteRefCount++;
+        break;
+    case STOPPING_OUTPUT:
+        if (mBeaconMuteRefCount > 0) {
+            mBeaconMuteRefCount--;
+        }
+        break;
+    case STARTING_BEACON:
+        mBeaconPlayingRefCount++;
+        break;
+    case STOPPING_BEACON:
+        if (mBeaconPlayingRefCount > 0) {
+            mBeaconPlayingRefCount--;
+        }
+        break;
+    }
+
+    if (mBeaconMuteRefCount > 0) {
+        // any playback causes beacon to be muted
+        return setBeaconMute(true);
+    } else {
+        // no other playback: unmute when beacon starts playing, mute when it stops
+        return setBeaconMute(mBeaconPlayingRefCount == 0);
+    }
+}
+
+uint32_t AudioPolicyManager::setBeaconMute(bool mute) {
+    ALOGV("setBeaconMute(%d) mBeaconMuteRefCount=%d mBeaconPlayingRefCount=%d",
+            mute, mBeaconMuteRefCount, mBeaconPlayingRefCount);
+    // keep track of muted state to avoid repeating mute/unmute operations
+    if (mBeaconMuted != mute) {
+        // mute/unmute AUDIO_STREAM_TTS on all outputs
+        ALOGV("\t muting %d", mute);
+        uint32_t maxLatency = 0;
+        for (size_t i = 0; i < mOutputs.size(); i++) {
+            sp<AudioOutputDescriptor> desc = mOutputs.valueAt(i);
+            setStreamMute(AUDIO_STREAM_TTS, mute/*on*/,
+                    desc->mIoHandle,
+                    0 /*delay*/, AUDIO_DEVICE_NONE);
+            const uint32_t latency = desc->latency() * 2;
+            if (latency > maxLatency) {
+                maxLatency = latency;
+            }
+        }
+        mBeaconMuted = mute;
+        return maxLatency;
+    }
+    return 0;
+}
+
 audio_devices_t AudioPolicyManager::getDeviceForStrategy(routing_strategy strategy,
                                                              bool fromCache)
 {
@@ -3980,6 +4102,14 @@
     audio_devices_t availableOutputDeviceTypes = mAvailableOutputDevices.types();
     switch (strategy) {
 
+    case STRATEGY_TRANSMITTED_THROUGH_SPEAKER:
+        device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_SPEAKER;
+        if (!device) {
+            ALOGE("getDeviceForStrategy() no device found for "\
+                    "STRATEGY_TRANSMITTED_THROUGH_SPEAKER");
+        }
+        break;
+
     case STRATEGY_SONIFICATION_RESPECTFUL:
         if (isInCall()) {
             device = getDeviceForStrategy(STRATEGY_SONIFICATION, false /*fromCache*/);
@@ -4252,6 +4382,7 @@
 
     for (size_t i = 0; i < NUM_STRATEGIES; i++) {
         audio_devices_t curDevice = getDeviceForStrategy((routing_strategy)i, false /*fromCache*/);
+        curDevice = curDevice & outputDesc->mProfile->mSupportedDevices.types();
         bool mute = shouldMute && (curDevice & device) && (curDevice != device);
         bool doMute = false;
 
@@ -4352,11 +4483,15 @@
     muteWaitMs = checkDeviceMuteStrategies(outputDesc, prevDevice, delayMs);
 
     // Do not change the routing if:
-    //  - the requested device is AUDIO_DEVICE_NONE
-    //  - the requested device is the same as current device and force is not specified.
+    //      the requested device is AUDIO_DEVICE_NONE
+    //      OR the requested device is the same as current device
+    //  AND force is not specified
+    //  AND the output is connected by a valid audio patch.
     // Doing this check here allows the caller to call setOutputDevice() without conditions
-    if ((device == AUDIO_DEVICE_NONE || device == prevDevice) && !force) {
-        ALOGV("setOutputDevice() setting same device %04x or null device for output %d", device, output);
+    if ((device == AUDIO_DEVICE_NONE || device == prevDevice) && !force &&
+            outputDesc->mPatchHandle != 0) {
+        ALOGV("setOutputDevice() setting same device %04x or null device for output %d",
+              device, output);
         return muteWaitMs;
     }
 
@@ -4900,6 +5035,16 @@
 };
 
 const AudioPolicyManager::VolumeCurvePoint
+    AudioPolicyManager::sLinearVolumeCurve[AudioPolicyManager::VOLCNT] = {
+    {0, -96.0f}, {33, -68.0f}, {66, -34.0f}, {100, 0.0f}
+};
+
+const AudioPolicyManager::VolumeCurvePoint
+    AudioPolicyManager::sSilentVolumeCurve[AudioPolicyManager::VOLCNT] = {
+    {0, -96.0f}, {1, -96.0f}, {2, -96.0f}, {100, -96.0f}
+};
+
+const AudioPolicyManager::VolumeCurvePoint
             *AudioPolicyManager::sVolumeProfiles[AUDIO_STREAM_CNT]
                                                    [AudioPolicyManager::DEVICE_CATEGORY_CNT] = {
     { // AUDIO_STREAM_VOICE_CALL
@@ -4957,10 +5102,11 @@
         sExtMediaSystemVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
     },
     { // AUDIO_STREAM_TTS
-        sDefaultMediaVolumeCurve, // DEVICE_CATEGORY_HEADSET
-        sSpeakerMediaVolumeCurve, // DEVICE_CATEGORY_SPEAKER
-        sDefaultMediaVolumeCurve, // DEVICE_CATEGORY_EARPIECE
-        sDefaultMediaVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
+      // "Transmitted Through Speaker": always silent except on DEVICE_CATEGORY_SPEAKER
+        sSilentVolumeCurve, // DEVICE_CATEGORY_HEADSET
+        sLinearVolumeCurve, // DEVICE_CATEGORY_SPEAKER
+        sSilentVolumeCurve, // DEVICE_CATEGORY_EARPIECE
+        sSilentVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
     },
 };
 
@@ -6816,7 +6962,11 @@
                                  ARRAY_SIZE(sDeviceNameToEnumTable),
                                  devName);
             if (type != AUDIO_DEVICE_NONE) {
-                add(new DeviceDescriptor(String8(""), type));
+                sp<DeviceDescriptor> dev = new DeviceDescriptor(String8(""), type);
+                if (type == AUDIO_DEVICE_IN_REMOTE_SUBMIX) {
+                    dev->mAddress = String8("0");
+                }
+                add(dev);
             } else {
                 sp<DeviceDescriptor> deviceDesc =
                         declaredDevices.getDeviceFromName(String8(devName));
diff --git a/services/audiopolicy/AudioPolicyManager.h b/services/audiopolicy/AudioPolicyManager.h
index 7dbd73f..50d7831 100644
--- a/services/audiopolicy/AudioPolicyManager.h
+++ b/services/audiopolicy/AudioPolicyManager.h
@@ -187,6 +187,7 @@
             STRATEGY_SONIFICATION_RESPECTFUL,
             STRATEGY_DTMF,
             STRATEGY_ENFORCED_AUDIBLE,
+            STRATEGY_TRANSMITTED_THROUGH_SPEAKER,
             NUM_STRATEGIES
         };
 
@@ -434,6 +435,8 @@
         static const VolumeCurvePoint sHeadsetSystemVolumeCurve[AudioPolicyManager::VOLCNT];
         static const VolumeCurvePoint sDefaultVoiceVolumeCurve[AudioPolicyManager::VOLCNT];
         static const VolumeCurvePoint sSpeakerVoiceVolumeCurve[AudioPolicyManager::VOLCNT];
+        static const VolumeCurvePoint sLinearVolumeCurve[AudioPolicyManager::VOLCNT];
+        static const VolumeCurvePoint sSilentVolumeCurve[AudioPolicyManager::VOLCNT];
         // default volume curves per stream and device category. See initializeVolumeCurves()
         static const VolumeCurvePoint *sVolumeProfiles[AUDIO_STREAM_CNT][DEVICE_CATEGORY_CNT];
 
@@ -808,6 +811,18 @@
         sp<AudioPatch> mCallTxPatch;
         sp<AudioPatch> mCallRxPatch;
 
+        // for supporting "beacon" streams, i.e. streams that only play on speaker, and never
+        // when something other than STREAM_TTS (a.k.a. "Transmitted Through Speaker") is playing
+        enum {
+            STARTING_OUTPUT,
+            STARTING_BEACON,
+            STOPPING_OUTPUT,
+            STOPPING_BEACON
+        };
+        uint32_t mBeaconMuteRefCount;   // ref count for stream that would mute beacon
+        uint32_t mBeaconPlayingRefCount;// ref count for the playing beacon streams
+        bool mBeaconMuted;              // has STREAM_TTS been muted
+
 #ifdef AUDIO_POLICY_TEST
         Mutex   mLock;
         Condition mWaitWorkCV;
@@ -852,6 +867,13 @@
                 const audio_offload_info_t *offloadInfo);
         // internal function to derive a stream type value from audio attributes
         audio_stream_type_t streamTypefromAttributesInt(const audio_attributes_t *attr);
+        // return true if any output is playing anything besides the stream to ignore
+        bool isAnyOutputActive(audio_stream_type_t streamToIgnore);
+        // event is one of STARTING_OUTPUT, STARTING_BEACON, STOPPING_OUTPUT, STOPPING_BEACON
+        // returns 0 if no mute/unmute event happened, the largest latency of the device where
+        //   the mute/unmute happened
+        uint32_t handleEventForBeacon(int event);
+        uint32_t setBeaconMute(bool mute);
 };
 
 };
diff --git a/services/camera/libcameraservice/api1/client2/Parameters.cpp b/services/camera/libcameraservice/api1/client2/Parameters.cpp
index 7b90d28..42a5507 100644
--- a/services/camera/libcameraservice/api1/client2/Parameters.cpp
+++ b/services/camera/libcameraservice/api1/client2/Parameters.cpp
@@ -2954,6 +2954,10 @@
             staticInfo(ANDROID_SENSOR_INFO_PHYSICAL_SIZE, 2, 2);
     if (!sensorSize.count) return NO_INIT;
 
+    camera_metadata_ro_entry_t pixelArraySize =
+            staticInfo(ANDROID_SENSOR_INFO_PIXEL_ARRAY_SIZE, 2, 2);
+    if (!pixelArraySize.count) return NO_INIT;
+
     float arrayAspect = static_cast<float>(fastInfo.arrayWidth) /
             fastInfo.arrayHeight;
     float stillAspect = static_cast<float>(pictureWidth) / pictureHeight;
@@ -3003,6 +3007,16 @@
         vertCropFactor = (arrayAspect < stillAspect) ?
                 (arrayAspect / stillAspect) : 1.f;
     }
+
+    /**
+     * Convert the crop factors w.r.t the active array size to the crop factors
+     * w.r.t the pixel array size.
+     */
+    horizCropFactor *= (static_cast<float>(fastInfo.arrayWidth) /
+                            pixelArraySize.data.i32[0]);
+    vertCropFactor *= (static_cast<float>(fastInfo.arrayHeight) /
+                            pixelArraySize.data.i32[1]);
+
     ALOGV("Horiz crop factor: %f, vert crop fact: %f",
             horizCropFactor, vertCropFactor);
     /**