Merge "Pass resolution to Crypto plugin on format change" into lmp-mr1-dev
diff --git a/camera/VendorTagDescriptor.cpp b/camera/VendorTagDescriptor.cpp
index 0dda6b6..dce313a 100644
--- a/camera/VendorTagDescriptor.cpp
+++ b/camera/VendorTagDescriptor.cpp
@@ -206,7 +206,7 @@
         return res;
     }
 
-    size_t sectionCount;
+    size_t sectionCount = 0;
     if (tagCount > 0) {
         if ((res = parcel->readInt32(reinterpret_cast<int32_t*>(&sectionCount))) != OK) {
             ALOGE("%s: could not read section count for.", __FUNCTION__);
diff --git a/include/media/AudioEffect.h b/include/media/AudioEffect.h
index 4932d40..583695d 100644
--- a/include/media/AudioEffect.h
+++ b/include/media/AudioEffect.h
@@ -133,10 +133,11 @@
      *
      * Returned value
      *   *descriptor updated with descriptors of pre processings enabled by default
-     *   *count      number of descriptors returned if returned status is N_ERROR.
+     *   *count      number of descriptors returned if returned status is NO_ERROR.
      *               total number of pre processing enabled by default if returned status is
      *               NO_MEMORY. This happens if the count passed as input is less than the number
-     *               of descriptors to return
+     *               of descriptors to return.
+     *               *count is limited to kMaxPreProcessing on return.
      */
     static status_t queryDefaultPreProcessing(int audioSession,
                                               effect_descriptor_t *descriptors,
@@ -391,6 +392,10 @@
       */
      static status_t guidToString(const effect_uuid_t *guid, char *str, size_t maxLen);
 
+     // kMaxPreProcessing is a reasonable value for the maximum number of preprocessing effects
+     // that can be applied simultaneously.
+     static const uint32_t kMaxPreProcessing = 10;
+
 protected:
      bool                    mEnabled;           // enable state
      int32_t                 mSessionId;         // audio session ID
diff --git a/media/libmedia/AudioTrack.cpp b/media/libmedia/AudioTrack.cpp
index 0a89fbb..082a5e1 100644
--- a/media/libmedia/AudioTrack.cpp
+++ b/media/libmedia/AudioTrack.cpp
@@ -278,7 +278,9 @@
     }
 
     // handle default values first.
-    if (streamType == AUDIO_STREAM_DEFAULT) {
+    // TODO once AudioPolicyManager fully supports audio_attributes_t,
+    //   remove stream "text-to-speech" redirect
+    if ((streamType == AUDIO_STREAM_DEFAULT) || (streamType == AUDIO_STREAM_TTS)) {
         streamType = AUDIO_STREAM_MUSIC;
     }
 
@@ -2124,17 +2126,30 @@
         mStreamType = AUDIO_STREAM_BLUETOOTH_SCO;
         return;
     }
+    // TODO once AudioPolicyManager fully supports audio_attributes_t,
+    //   remove stream remap, the flag will be enough
+    if ((aa.flags & AUDIO_FLAG_BEACON) == AUDIO_FLAG_BEACON) {
+        mStreamType = AUDIO_STREAM_TTS;
+        return;
+    }
 
     // usage to stream type mapping
     switch (aa.usage) {
-    case AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY:
+    case AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY: {
         // TODO once AudioPolicyManager fully supports audio_attributes_t,
-        //   remove stream change based on phone state
-        if (AudioSystem::getPhoneState() == AUDIO_MODE_RINGTONE) {
+        //   remove stream change based on stream activity
+        bool active;
+        status_t status = AudioSystem::isStreamActive(AUDIO_STREAM_RING, &active, 0);
+        if (status == NO_ERROR && active == true) {
             mStreamType = AUDIO_STREAM_RING;
             break;
         }
-        /// FALL THROUGH
+        status = AudioSystem::isStreamActive(AUDIO_STREAM_ALARM, &active, 0);
+        if (status == NO_ERROR && active == true) {
+            mStreamType = AUDIO_STREAM_ALARM;
+            break;
+        }
+    }    /// FALL THROUGH
     case AUDIO_USAGE_MEDIA:
     case AUDIO_USAGE_GAME:
     case AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE:
@@ -2174,7 +2189,7 @@
 
 bool AudioTrack::isValidAttributes(const audio_attributes_t *paa) {
     // has flags that map to a strategy?
-    if ((paa->flags & (AUDIO_FLAG_AUDIBILITY_ENFORCED | AUDIO_FLAG_SCO)) != 0) {
+    if ((paa->flags & (AUDIO_FLAG_AUDIBILITY_ENFORCED | AUDIO_FLAG_SCO | AUDIO_FLAG_BEACON)) != 0) {
         return true;
     }
 
diff --git a/media/libmedia/AudioTrackShared.cpp b/media/libmedia/AudioTrackShared.cpp
index eec025e..561cb24 100644
--- a/media/libmedia/AudioTrackShared.cpp
+++ b/media/libmedia/AudioTrackShared.cpp
@@ -348,7 +348,13 @@
 
 void AudioTrackClientProxy::flush()
 {
-    mCblk->u.mStreaming.mFlush++;
+    // This works for mFrameCountP2 <= 2^30
+    size_t increment = mFrameCountP2 << 1;
+    size_t mask = increment - 1;
+    audio_track_cblk_t* cblk = mCblk;
+    int32_t newFlush = (cblk->u.mStreaming.mRear & mask) |
+                        ((cblk->u.mStreaming.mFlush & ~mask) + increment);
+    android_atomic_release_store(newFlush, &cblk->u.mStreaming.mFlush);
 }
 
 bool AudioTrackClientProxy::clearStreamEndDone() {
@@ -536,17 +542,27 @@
         rear = android_atomic_acquire_load(&cblk->u.mStreaming.mRear);
         front = cblk->u.mStreaming.mFront;
         if (flush != mFlush) {
-            mFlush = flush;
             // effectively obtain then release whatever is in the buffer
-            android_atomic_release_store(rear, &cblk->u.mStreaming.mFront);
-            if (front != rear) {
+            size_t mask = (mFrameCountP2 << 1) - 1;
+            int32_t newFront = (front & ~mask) | (flush & mask);
+            ssize_t filled = rear - newFront;
+            // Rather than shutting down on a corrupt flush, just treat it as a full flush
+            if (!(0 <= filled && (size_t) filled <= mFrameCount)) {
+                ALOGE("mFlush %#x -> %#x, front %#x, rear %#x, mask %#x, newFront %#x, filled %d=%#x",
+                        mFlush, flush, front, rear, mask, newFront, filled, filled);
+                newFront = rear;
+            }
+            mFlush = flush;
+            android_atomic_release_store(newFront, &cblk->u.mStreaming.mFront);
+            // There is no danger from a false positive, so err on the side of caution
+            if (true /*front != newFront*/) {
                 int32_t old = android_atomic_or(CBLK_FUTEX_WAKE, &cblk->mFutex);
                 if (!(old & CBLK_FUTEX_WAKE)) {
                     (void) syscall(__NR_futex, &cblk->mFutex,
                             mClientInServer ? FUTEX_WAKE_PRIVATE : FUTEX_WAKE, 1);
                 }
             }
-            front = rear;
+            front = newFront;
         }
     } else {
         front = android_atomic_acquire_load(&cblk->u.mStreaming.mFront);
@@ -668,6 +684,7 @@
 
     int32_t flush = cblk->u.mStreaming.mFlush;
     if (flush != mFlush) {
+        // FIXME should return an accurate value, but over-estimate is better than under-estimate
         return mFrameCount;
     }
     // the acquire might not be necessary since not doing a subsequent read
diff --git a/media/libmedia/IAudioPolicyService.cpp b/media/libmedia/IAudioPolicyService.cpp
index 256cb3f..180f5fb 100644
--- a/media/libmedia/IAudioPolicyService.cpp
+++ b/media/libmedia/IAudioPolicyService.cpp
@@ -23,6 +23,7 @@
 
 #include <binder/Parcel.h>
 
+#include <media/AudioEffect.h>
 #include <media/IAudioPolicyService.h>
 
 #include <system/audio.h>
@@ -916,16 +917,18 @@
             CHECK_INTERFACE(IAudioPolicyService, data, reply);
             int audioSession = data.readInt32();
             uint32_t count = data.readInt32();
+            if (count > AudioEffect::kMaxPreProcessing) {
+                count = AudioEffect::kMaxPreProcessing;
+            }
             uint32_t retCount = count;
-            effect_descriptor_t *descriptors =
-                    (effect_descriptor_t *)new char[count * sizeof(effect_descriptor_t)];
+            effect_descriptor_t *descriptors = new effect_descriptor_t[count];
             status_t status = queryDefaultPreProcessing(audioSession, descriptors, &retCount);
             reply->writeInt32(status);
             if (status != NO_ERROR && status != NO_MEMORY) {
                 retCount = 0;
             }
             reply->writeInt32(retCount);
-            if (retCount) {
+            if (retCount != 0) {
                 if (retCount < count) {
                     count = retCount;
                 }
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
index 4a9d790..931451f 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
@@ -306,10 +306,6 @@
     (new AMessage(kWhatPause, id()))->post();
 }
 
-void NuPlayer::resume() {
-    (new AMessage(kWhatResume, id()))->post();
-}
-
 void NuPlayer::resetAsync() {
     if (mSource != NULL) {
         // During a reset, the data source might be unresponsive already, we need to
@@ -574,69 +570,11 @@
         case kWhatStart:
         {
             ALOGV("kWhatStart");
-
-            mVideoIsAVC = false;
-            mOffloadAudio = false;
-            mAudioEOS = false;
-            mVideoEOS = false;
-            mSkipRenderingAudioUntilMediaTimeUs = -1;
-            mSkipRenderingVideoUntilMediaTimeUs = -1;
-            mNumFramesTotal = 0;
-            mNumFramesDropped = 0;
-            mStarted = true;
-
-            /* instantiate decoders now for secure playback */
-            if (mSourceFlags & Source::FLAG_SECURE) {
-                if (mNativeWindow != NULL) {
-                    instantiateDecoder(false, &mVideoDecoder);
-                }
-
-                if (mAudioSink != NULL) {
-                    instantiateDecoder(true, &mAudioDecoder);
-                }
+            if (mStarted) {
+                onResume();
+            } else {
+                onStart();
             }
-
-            mSource->start();
-
-            uint32_t flags = 0;
-
-            if (mSource->isRealTime()) {
-                flags |= Renderer::FLAG_REAL_TIME;
-            }
-
-            sp<MetaData> audioMeta = mSource->getFormatMeta(true /* audio */);
-            audio_stream_type_t streamType = AUDIO_STREAM_MUSIC;
-            if (mAudioSink != NULL) {
-                streamType = mAudioSink->getAudioStreamType();
-            }
-
-            sp<AMessage> videoFormat = mSource->getFormat(false /* audio */);
-
-            mOffloadAudio =
-                canOffloadStream(audioMeta, (videoFormat != NULL),
-                                 true /* is_streaming */, streamType);
-            if (mOffloadAudio) {
-                flags |= Renderer::FLAG_OFFLOAD_AUDIO;
-            }
-
-            sp<AMessage> notify = new AMessage(kWhatRendererNotify, id());
-            ++mRendererGeneration;
-            notify->setInt32("generation", mRendererGeneration);
-            mRenderer = new Renderer(mAudioSink, notify, flags);
-
-            mRendererLooper = new ALooper;
-            mRendererLooper->setName("NuPlayerRenderer");
-            mRendererLooper->start(false, false, ANDROID_PRIORITY_AUDIO);
-            mRendererLooper->registerHandler(mRenderer);
-
-            sp<MetaData> meta = getFileMeta();
-            int32_t rate;
-            if (meta != NULL
-                    && meta->findInt32(kKeyFrameRate, &rate) && rate > 0) {
-                mRenderer->setVideoFrameRate(rate);
-            }
-
-            postScanSources();
             break;
         }
 
@@ -979,26 +917,6 @@
             break;
         }
 
-        case kWhatResume:
-        {
-            if (mSource != NULL) {
-                mSource->resume();
-            } else {
-                ALOGW("resume called when source is gone or not set");
-            }
-            // |mAudioDecoder| may have been released due to the pause timeout, so re-create it if
-            // needed.
-            if (audioDecoderStillNeeded() && mAudioDecoder == NULL) {
-                instantiateDecoder(true /* audio */, &mAudioDecoder);
-            }
-            if (mRenderer != NULL) {
-                mRenderer->resume();
-            } else {
-                ALOGW("resume called when renderer is gone or not set");
-            }
-            break;
-        }
-
         case kWhatSourceNotify:
         {
             onSourceNotify(msg);
@@ -1017,6 +935,89 @@
     }
 }
 
+void NuPlayer::onResume() {
+    if (mSource != NULL) {
+        mSource->resume();
+    } else {
+        ALOGW("resume called when source is gone or not set");
+    }
+    // |mAudioDecoder| may have been released due to the pause timeout, so re-create it if
+    // needed.
+    if (audioDecoderStillNeeded() && mAudioDecoder == NULL) {
+        instantiateDecoder(true /* audio */, &mAudioDecoder);
+    }
+    if (mRenderer != NULL) {
+        mRenderer->resume();
+    } else {
+        ALOGW("resume called when renderer is gone or not set");
+    }
+}
+
+void NuPlayer::onStart() {
+    mVideoIsAVC = false;
+    mOffloadAudio = false;
+    mAudioEOS = false;
+    mVideoEOS = false;
+    mSkipRenderingAudioUntilMediaTimeUs = -1;
+    mSkipRenderingVideoUntilMediaTimeUs = -1;
+    mNumFramesTotal = 0;
+    mNumFramesDropped = 0;
+    mStarted = true;
+
+    /* instantiate decoders now for secure playback */
+    if (mSourceFlags & Source::FLAG_SECURE) {
+        if (mNativeWindow != NULL) {
+            instantiateDecoder(false, &mVideoDecoder);
+        }
+
+        if (mAudioSink != NULL) {
+            instantiateDecoder(true, &mAudioDecoder);
+        }
+    }
+
+    mSource->start();
+
+    uint32_t flags = 0;
+
+    if (mSource->isRealTime()) {
+        flags |= Renderer::FLAG_REAL_TIME;
+    }
+
+    sp<MetaData> audioMeta = mSource->getFormatMeta(true /* audio */);
+    audio_stream_type_t streamType = AUDIO_STREAM_MUSIC;
+    if (mAudioSink != NULL) {
+        streamType = mAudioSink->getAudioStreamType();
+    }
+
+    sp<AMessage> videoFormat = mSource->getFormat(false /* audio */);
+
+    mOffloadAudio =
+        canOffloadStream(audioMeta, (videoFormat != NULL),
+                         true /* is_streaming */, streamType);
+    if (mOffloadAudio) {
+        flags |= Renderer::FLAG_OFFLOAD_AUDIO;
+    }
+
+    sp<AMessage> notify = new AMessage(kWhatRendererNotify, id());
+    ++mRendererGeneration;
+    notify->setInt32("generation", mRendererGeneration);
+    mRenderer = new Renderer(mAudioSink, notify, flags);
+
+    mRendererLooper = new ALooper;
+    mRendererLooper->setName("NuPlayerRenderer");
+    mRendererLooper->start(false, false, ANDROID_PRIORITY_AUDIO);
+    mRendererLooper->registerHandler(mRenderer);
+
+    sp<MetaData> meta = getFileMeta();
+    int32_t rate;
+    if (meta != NULL
+            && meta->findInt32(kKeyFrameRate, &rate) && rate > 0) {
+        mRenderer->setVideoFrameRate(rate);
+    }
+
+    postScanSources();
+}
+
 bool NuPlayer::audioDecoderStillNeeded() {
     // Audio decoder is no longer needed if it's in shut/shutting down status.
     return ((mFlushingAudio != SHUT_DOWN) && (mFlushingAudio != SHUTTING_DOWN_DECODER));
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.h b/media/libmediaplayerservice/nuplayer/NuPlayer.h
index d6120d2..14056ca 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.h
@@ -54,7 +54,6 @@
     void start();
 
     void pause();
-    void resume();
 
     // Will notify the driver through "notifyResetComplete" once finished.
     void resetAsync();
@@ -213,6 +212,9 @@
     void handleFlushComplete(bool audio, bool isDecoder);
     void finishFlushIfPossible();
 
+    void onStart();
+    void onResume();
+
     bool audioDecoderStillNeeded();
 
     void flushDecoder(
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
index ab46074..b9a1a6c 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
@@ -239,16 +239,24 @@
             // fall through
         }
 
+        case STATE_PAUSED:
+        case STATE_STOPPED_AND_PREPARED:
+        {
+            if (mAtEOS && mStartupSeekTimeUs < 0) {
+                mStartupSeekTimeUs = 0;
+                mPositionUs = -1;
+            }
+
+            // fall through
+        }
+
         case STATE_PREPARED:
         {
             mAtEOS = false;
             mPlayer->start();
 
             if (mStartupSeekTimeUs >= 0) {
-                if (mStartupSeekTimeUs > 0) {
-                    mPlayer->seekToAsync(mStartupSeekTimeUs);
-                }
-
+                mPlayer->seekToAsync(mStartupSeekTimeUs);
                 mStartupSeekTimeUs = -1;
             }
             break;
@@ -264,20 +272,6 @@
             break;
         }
 
-        case STATE_PAUSED:
-        case STATE_STOPPED_AND_PREPARED:
-        {
-            if (mAtEOS) {
-                mPlayer->seekToAsync(0);
-                mAtEOS = false;
-                mPlayer->resume();
-                mPositionUs = -1;
-            } else {
-                mPlayer->resume();
-            }
-            break;
-        }
-
         default:
             return INVALID_OPERATION;
     }
@@ -348,6 +342,7 @@
 
     switch (mState) {
         case STATE_PREPARED:
+        case STATE_STOPPED_AND_PREPARED:
         {
             mStartupSeekTimeUs = seekTimeUs;
             // pretend that the seek completed. It will actually happen when starting playback.
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
index e0c5e18..5d9001c 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
@@ -38,7 +38,7 @@
 
 // Maximum time in paused state when offloading audio decompression. When elapsed, the AudioSink
 // is closed to allow the audio DSP to power down.
-static const int64_t kOffloadPauseMaxUs = 60000000ll;
+static const int64_t kOffloadPauseMaxUs = 10000000ll;
 
 // static
 const int64_t NuPlayer::Renderer::kMinPositionUpdateDelayUs = 100000ll;
diff --git a/media/libstagefright/AwesomePlayer.cpp b/media/libstagefright/AwesomePlayer.cpp
index ab8ac79..6a56729 100644
--- a/media/libstagefright/AwesomePlayer.cpp
+++ b/media/libstagefright/AwesomePlayer.cpp
@@ -75,7 +75,7 @@
 
 // maximum time in paused state when offloading audio decompression. When elapsed, the AudioPlayer
 // is destroyed to allow the audio DSP to power down.
-static int64_t kOffloadPauseMaxUs = 60000000ll;
+static int64_t kOffloadPauseMaxUs = 10000000ll;
 
 
 struct AwesomeEvent : public TimedEventQueue::Event {
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 7b65de7..ea9d7d3 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -2633,7 +2633,8 @@
     // Check whether the destination thread has a channel count of FCC_2, which is
     // currently required for (most) effects. Prevent moving the effect chain here rather
     // than disabling the addEffect_l() call in dstThread below.
-    if (dstThread->mChannelCount != FCC_2) {
+    if ((dstThread->type() == ThreadBase::MIXER || dstThread->type() == ThreadBase::DUPLICATING) &&
+            dstThread->mChannelCount != FCC_2) {
         ALOGW("moveEffectChain_l() effect chain failed because"
                 " destination thread %p channel count(%u) != %u",
                 dstThread, dstThread->mChannelCount, FCC_2);
diff --git a/services/audiopolicy/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/AudioPolicyInterfaceImpl.cpp
index dceda97..c06ca72 100644
--- a/services/audiopolicy/AudioPolicyInterfaceImpl.cpp
+++ b/services/audiopolicy/AudioPolicyInterfaceImpl.cpp
@@ -129,8 +129,11 @@
                                     audio_output_flags_t flags,
                                     const audio_offload_info_t *offloadInfo)
 {
+    if (uint32_t(stream) >= AUDIO_STREAM_CNT) {
+        return AUDIO_IO_HANDLE_NONE;
+    }
     if (mAudioPolicyManager == NULL) {
-        return 0;
+        return AUDIO_IO_HANDLE_NONE;
     }
     ALOGV("getOutput()");
     Mutex::Autolock _l(mLock);
@@ -158,6 +161,9 @@
                                          audio_stream_type_t stream,
                                          int session)
 {
+    if (uint32_t(stream) >= AUDIO_STREAM_CNT) {
+        return BAD_VALUE;
+    }
     if (mAudioPolicyManager == NULL) {
         return NO_INIT;
     }
@@ -182,6 +188,9 @@
                                         audio_stream_type_t stream,
                                         int session)
 {
+    if (uint32_t(stream) >= AUDIO_STREAM_CNT) {
+        return BAD_VALUE;
+    }
     if (mAudioPolicyManager == NULL) {
         return NO_INIT;
     }
@@ -368,6 +377,9 @@
 
 uint32_t AudioPolicyService::getStrategyForStream(audio_stream_type_t stream)
 {
+    if (uint32_t(stream) >= AUDIO_STREAM_CNT) {
+        return 0;
+    }
     if (mAudioPolicyManager == NULL) {
         return 0;
     }
@@ -378,8 +390,11 @@
 
 audio_devices_t AudioPolicyService::getDevicesForStream(audio_stream_type_t stream)
 {
+    if (uint32_t(stream) >= AUDIO_STREAM_CNT) {
+        return AUDIO_DEVICE_NONE;
+    }
     if (mAudioPolicyManager == NULL) {
-        return (audio_devices_t)0;
+        return AUDIO_DEVICE_NONE;
     }
     return mAudioPolicyManager->getDevicesForStream(stream);
 }
@@ -424,8 +439,11 @@
 
 bool AudioPolicyService::isStreamActive(audio_stream_type_t stream, uint32_t inPastMs) const
 {
+    if (uint32_t(stream) >= AUDIO_STREAM_CNT) {
+        return false;
+    }
     if (mAudioPolicyManager == NULL) {
-        return 0;
+        return false;
     }
     Mutex::Autolock _l(mLock);
     return mAudioPolicyManager->isStreamActive(stream, inPastMs);
@@ -433,8 +451,11 @@
 
 bool AudioPolicyService::isStreamActiveRemotely(audio_stream_type_t stream, uint32_t inPastMs) const
 {
+    if (uint32_t(stream) >= AUDIO_STREAM_CNT) {
+        return false;
+    }
     if (mAudioPolicyManager == NULL) {
-        return 0;
+        return false;
     }
     Mutex::Autolock _l(mLock);
     return mAudioPolicyManager->isStreamActiveRemotely(stream, inPastMs);
diff --git a/services/audiopolicy/AudioPolicyInterfaceImplLegacy.cpp b/services/audiopolicy/AudioPolicyInterfaceImplLegacy.cpp
index ac41968..09476c1 100644
--- a/services/audiopolicy/AudioPolicyInterfaceImplLegacy.cpp
+++ b/services/audiopolicy/AudioPolicyInterfaceImplLegacy.cpp
@@ -134,8 +134,11 @@
                                     audio_output_flags_t flags,
                                     const audio_offload_info_t *offloadInfo)
 {
+    if (uint32_t(stream) >= AUDIO_STREAM_CNT) {
+        return AUDIO_IO_HANDLE_NONE;
+    }
     if (mpAudioPolicy == NULL) {
-        return 0;
+        return AUDIO_IO_HANDLE_NONE;
     }
     ALOGV("getOutput()");
     Mutex::Autolock _l(mLock);
@@ -147,6 +150,9 @@
                                          audio_stream_type_t stream,
                                          int session)
 {
+    if (uint32_t(stream) >= AUDIO_STREAM_CNT) {
+        return BAD_VALUE;
+    }
     if (mpAudioPolicy == NULL) {
         return NO_INIT;
     }
@@ -172,6 +178,9 @@
                                         audio_stream_type_t stream,
                                         int session)
 {
+    if (uint32_t(stream) >= AUDIO_STREAM_CNT) {
+        return BAD_VALUE;
+    }
     if (mpAudioPolicy == NULL) {
         return NO_INIT;
     }
@@ -370,6 +379,9 @@
 
 uint32_t AudioPolicyService::getStrategyForStream(audio_stream_type_t stream)
 {
+    if (uint32_t(stream) >= AUDIO_STREAM_CNT) {
+        return 0;
+    }
     if (mpAudioPolicy == NULL) {
         return 0;
     }
@@ -380,8 +392,11 @@
 
 audio_devices_t AudioPolicyService::getDevicesForStream(audio_stream_type_t stream)
 {
+    if (uint32_t(stream) >= AUDIO_STREAM_CNT) {
+        return AUDIO_DEVICE_NONE;
+    }
     if (mpAudioPolicy == NULL) {
-        return (audio_devices_t)0;
+        return AUDIO_DEVICE_NONE;
     }
     return mpAudioPolicy->get_devices_for_stream(mpAudioPolicy, stream);
 }
@@ -426,8 +441,11 @@
 
 bool AudioPolicyService::isStreamActive(audio_stream_type_t stream, uint32_t inPastMs) const
 {
+    if (uint32_t(stream) >= AUDIO_STREAM_CNT) {
+        return false;
+    }
     if (mpAudioPolicy == NULL) {
-        return 0;
+        return false;
     }
     Mutex::Autolock _l(mLock);
     return mpAudioPolicy->is_stream_active(mpAudioPolicy, stream, inPastMs);
@@ -435,8 +453,11 @@
 
 bool AudioPolicyService::isStreamActiveRemotely(audio_stream_type_t stream, uint32_t inPastMs) const
 {
+    if (uint32_t(stream) >= AUDIO_STREAM_CNT) {
+        return false;
+    }
     if (mpAudioPolicy == NULL) {
-        return 0;
+        return false;
     }
     Mutex::Autolock _l(mLock);
     return mpAudioPolicy->is_stream_active_remotely(mpAudioPolicy, stream, inPastMs);
diff --git a/services/audiopolicy/AudioPolicyManager.cpp b/services/audiopolicy/AudioPolicyManager.cpp
index 5567800..584e170 100644
--- a/services/audiopolicy/AudioPolicyManager.cpp
+++ b/services/audiopolicy/AudioPolicyManager.cpp
@@ -1127,6 +1127,20 @@
         return BAD_VALUE;
     }
 
+    // cannot start playback of STREAM_TTS if any other output is being used
+    uint32_t beaconMuteLatency = 0;
+    if (stream == AUDIO_STREAM_TTS) {
+        ALOGV("\t found BEACON stream");
+        if (isAnyOutputActive(AUDIO_STREAM_TTS /*streamToIgnore*/)) {
+            return INVALID_OPERATION;
+        } else {
+            beaconMuteLatency = handleEventForBeacon(STARTING_BEACON);
+        }
+    } else {
+        // some playback other than beacon starts
+        beaconMuteLatency = handleEventForBeacon(STARTING_OUTPUT);
+    }
+
     sp<AudioOutputDescriptor> outputDesc = mOutputs.valueAt(index);
 
     // increment usage count for this stream on the requested output:
@@ -1138,8 +1152,9 @@
         audio_devices_t newDevice = getNewOutputDevice(output, false /*fromCache*/);
         routing_strategy strategy = getStrategy(stream);
         bool shouldWait = (strategy == STRATEGY_SONIFICATION) ||
-                            (strategy == STRATEGY_SONIFICATION_RESPECTFUL);
-        uint32_t waitMs = 0;
+                            (strategy == STRATEGY_SONIFICATION_RESPECTFUL) ||
+                            (beaconMuteLatency > 0);
+        uint32_t waitMs = beaconMuteLatency;
         bool force = false;
         for (size_t i = 0; i < mOutputs.size(); i++) {
             sp<AudioOutputDescriptor> desc = mOutputs.valueAt(i);
@@ -1153,7 +1168,8 @@
                     force = true;
                 }
                 // wait for audio on other active outputs to be presented when starting
-                // a notification so that audio focus effect can propagate.
+                // a notification so that audio focus effect can propagate, or that a mute/unmute
+                // event occurred for beacon
                 uint32_t latency = desc->latency();
                 if (shouldWait && desc->isActive(latency * 2) && (waitMs < latency)) {
                     waitMs = latency;
@@ -1197,6 +1213,9 @@
 
     sp<AudioOutputDescriptor> outputDesc = mOutputs.valueAt(index);
 
+    // always handle stream stop, check which stream type is stopping
+    handleEventForBeacon(stream == AUDIO_STREAM_TTS ? STOPPING_BEACON : STOPPING_OUTPUT);
+
     // handle special case for sonification while in call
     if (isInCall()) {
         handleIncallSonification(stream, false, false);
@@ -2669,7 +2688,10 @@
     mTotalEffectsCpuLoad(0), mTotalEffectsMemory(0),
     mA2dpSuspended(false),
     mSpeakerDrcEnabled(false), mNextUniqueId(1),
-    mAudioPortGeneration(1)
+    mAudioPortGeneration(1),
+    mBeaconMuteRefCount(0),
+    mBeaconPlayingRefCount(0),
+    mBeaconMuted(false)
 {
     mUidCached = getuid();
     mpClientInterface = clientInterface;
@@ -3840,6 +3862,8 @@
     //      use device for strategy media
     // 7: the strategy DTMF is active on the output:
     //      use device for strategy DTMF
+    // 8: the strategy for beacon, a.k.a. "transmitted through speaker" is active on the output:
+    //      use device for strategy t-t-s
     if (outputDesc->isStrategyActive(STRATEGY_ENFORCED_AUDIBLE) &&
         mForceUse[AUDIO_POLICY_FORCE_FOR_SYSTEM] == AUDIO_POLICY_FORCE_SYSTEM_ENFORCED) {
         device = getDeviceForStrategy(STRATEGY_ENFORCED_AUDIBLE, fromCache);
@@ -3856,6 +3880,8 @@
         device = getDeviceForStrategy(STRATEGY_MEDIA, fromCache);
     } else if (outputDesc->isStrategyActive(STRATEGY_DTMF)) {
         device = getDeviceForStrategy(STRATEGY_DTMF, fromCache);
+    } else if (outputDesc->isStrategyActive(STRATEGY_TRANSMITTED_THROUGH_SPEAKER)) {
+        device = getDeviceForStrategy(STRATEGY_TRANSMITTED_THROUGH_SPEAKER, fromCache);
     }
 
     ALOGV("getNewOutputDevice() selected device %x", device);
@@ -3934,16 +3960,20 @@
     case AUDIO_STREAM_SYSTEM:
         // NOTE: SYSTEM stream uses MEDIA strategy because muting music and switching outputs
         // while key clicks are played produces a poor result
-    case AUDIO_STREAM_TTS:
     case AUDIO_STREAM_MUSIC:
         return STRATEGY_MEDIA;
     case AUDIO_STREAM_ENFORCED_AUDIBLE:
         return STRATEGY_ENFORCED_AUDIBLE;
+    case AUDIO_STREAM_TTS:
+        return STRATEGY_TRANSMITTED_THROUGH_SPEAKER;
     }
 }
 
 uint32_t AudioPolicyManager::getStrategyForAttr(const audio_attributes_t *attr) {
     // flags to strategy mapping
+    if ((attr->flags & AUDIO_FLAG_BEACON) == AUDIO_FLAG_BEACON) {
+        return (uint32_t) STRATEGY_TRANSMITTED_THROUGH_SPEAKER;
+    }
     if ((attr->flags & AUDIO_FLAG_AUDIBILITY_ENFORCED) == AUDIO_FLAG_AUDIBILITY_ENFORCED) {
         return (uint32_t) STRATEGY_ENFORCED_AUDIBLE;
     }
@@ -3991,6 +4021,74 @@
     }
 }
 
+bool AudioPolicyManager::isAnyOutputActive(audio_stream_type_t streamToIgnore) {
+    for (size_t s = 0 ; s < AUDIO_STREAM_CNT ; s++) {
+        if (s == (size_t) streamToIgnore) {
+            continue;
+        }
+        for (size_t i = 0; i < mOutputs.size(); i++) {
+            const sp<AudioOutputDescriptor> outputDesc = mOutputs.valueAt(i);
+            if (outputDesc->mRefCount[s] != 0) {
+                return true;
+            }
+        }
+    }
+    return false;
+}
+
+uint32_t AudioPolicyManager::handleEventForBeacon(int event) {
+    switch(event) {
+    case STARTING_OUTPUT:
+        mBeaconMuteRefCount++;
+        break;
+    case STOPPING_OUTPUT:
+        if (mBeaconMuteRefCount > 0) {
+            mBeaconMuteRefCount--;
+        }
+        break;
+    case STARTING_BEACON:
+        mBeaconPlayingRefCount++;
+        break;
+    case STOPPING_BEACON:
+        if (mBeaconPlayingRefCount > 0) {
+            mBeaconPlayingRefCount--;
+        }
+        break;
+    }
+
+    if (mBeaconMuteRefCount > 0) {
+        // any playback causes beacon to be muted
+        return setBeaconMute(true);
+    } else {
+        // no other playback: unmute when beacon starts playing, mute when it stops
+        return setBeaconMute(mBeaconPlayingRefCount == 0);
+    }
+}
+
+uint32_t AudioPolicyManager::setBeaconMute(bool mute) {
+    ALOGV("setBeaconMute(%d) mBeaconMuteRefCount=%d mBeaconPlayingRefCount=%d",
+            mute, mBeaconMuteRefCount, mBeaconPlayingRefCount);
+    // keep track of muted state to avoid repeating mute/unmute operations
+    if (mBeaconMuted != mute) {
+        // mute/unmute AUDIO_STREAM_TTS on all outputs
+        ALOGV("\t muting %d", mute);
+        uint32_t maxLatency = 0;
+        for (size_t i = 0; i < mOutputs.size(); i++) {
+            sp<AudioOutputDescriptor> desc = mOutputs.valueAt(i);
+            setStreamMute(AUDIO_STREAM_TTS, mute/*on*/,
+                    desc->mIoHandle,
+                    0 /*delay*/, AUDIO_DEVICE_NONE);
+            const uint32_t latency = desc->latency() * 2;
+            if (latency > maxLatency) {
+                maxLatency = latency;
+            }
+        }
+        mBeaconMuted = mute;
+        return maxLatency;
+    }
+    return 0;
+}
+
 audio_devices_t AudioPolicyManager::getDeviceForStrategy(routing_strategy strategy,
                                                              bool fromCache)
 {
@@ -4004,6 +4102,14 @@
     audio_devices_t availableOutputDeviceTypes = mAvailableOutputDevices.types();
     switch (strategy) {
 
+    case STRATEGY_TRANSMITTED_THROUGH_SPEAKER:
+        device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_SPEAKER;
+        if (!device) {
+            ALOGE("getDeviceForStrategy() no device found for "\
+                    "STRATEGY_TRANSMITTED_THROUGH_SPEAKER");
+        }
+        break;
+
     case STRATEGY_SONIFICATION_RESPECTFUL:
         if (isInCall()) {
             device = getDeviceForStrategy(STRATEGY_SONIFICATION, false /*fromCache*/);
@@ -4929,6 +5035,16 @@
 };
 
 const AudioPolicyManager::VolumeCurvePoint
+    AudioPolicyManager::sLinearVolumeCurve[AudioPolicyManager::VOLCNT] = {
+    {0, -96.0f}, {33, -68.0f}, {66, -34.0f}, {100, 0.0f}
+};
+
+const AudioPolicyManager::VolumeCurvePoint
+    AudioPolicyManager::sSilentVolumeCurve[AudioPolicyManager::VOLCNT] = {
+    {0, -96.0f}, {1, -96.0f}, {2, -96.0f}, {100, -96.0f}
+};
+
+const AudioPolicyManager::VolumeCurvePoint
             *AudioPolicyManager::sVolumeProfiles[AUDIO_STREAM_CNT]
                                                    [AudioPolicyManager::DEVICE_CATEGORY_CNT] = {
     { // AUDIO_STREAM_VOICE_CALL
@@ -4986,10 +5102,11 @@
         sExtMediaSystemVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
     },
     { // AUDIO_STREAM_TTS
-        sDefaultMediaVolumeCurve, // DEVICE_CATEGORY_HEADSET
-        sSpeakerMediaVolumeCurve, // DEVICE_CATEGORY_SPEAKER
-        sDefaultMediaVolumeCurve, // DEVICE_CATEGORY_EARPIECE
-        sDefaultMediaVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
+      // "Transmitted Through Speaker": always silent except on DEVICE_CATEGORY_SPEAKER
+        sSilentVolumeCurve, // DEVICE_CATEGORY_HEADSET
+        sLinearVolumeCurve, // DEVICE_CATEGORY_SPEAKER
+        sSilentVolumeCurve, // DEVICE_CATEGORY_EARPIECE
+        sSilentVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
     },
 };
 
diff --git a/services/audiopolicy/AudioPolicyManager.h b/services/audiopolicy/AudioPolicyManager.h
index 7dbd73f..50d7831 100644
--- a/services/audiopolicy/AudioPolicyManager.h
+++ b/services/audiopolicy/AudioPolicyManager.h
@@ -187,6 +187,7 @@
             STRATEGY_SONIFICATION_RESPECTFUL,
             STRATEGY_DTMF,
             STRATEGY_ENFORCED_AUDIBLE,
+            STRATEGY_TRANSMITTED_THROUGH_SPEAKER,
             NUM_STRATEGIES
         };
 
@@ -434,6 +435,8 @@
         static const VolumeCurvePoint sHeadsetSystemVolumeCurve[AudioPolicyManager::VOLCNT];
         static const VolumeCurvePoint sDefaultVoiceVolumeCurve[AudioPolicyManager::VOLCNT];
         static const VolumeCurvePoint sSpeakerVoiceVolumeCurve[AudioPolicyManager::VOLCNT];
+        static const VolumeCurvePoint sLinearVolumeCurve[AudioPolicyManager::VOLCNT];
+        static const VolumeCurvePoint sSilentVolumeCurve[AudioPolicyManager::VOLCNT];
         // default volume curves per stream and device category. See initializeVolumeCurves()
         static const VolumeCurvePoint *sVolumeProfiles[AUDIO_STREAM_CNT][DEVICE_CATEGORY_CNT];
 
@@ -808,6 +811,18 @@
         sp<AudioPatch> mCallTxPatch;
         sp<AudioPatch> mCallRxPatch;
 
+        // for supporting "beacon" streams, i.e. streams that only play on speaker, and never
+        // when something other than STREAM_TTS (a.k.a. "Transmitted Through Speaker") is playing
+        enum {
+            STARTING_OUTPUT,
+            STARTING_BEACON,
+            STOPPING_OUTPUT,
+            STOPPING_BEACON
+        };
+        uint32_t mBeaconMuteRefCount;   // ref count for stream that would mute beacon
+        uint32_t mBeaconPlayingRefCount;// ref count for the playing beacon streams
+        bool mBeaconMuted;              // has STREAM_TTS been muted
+
 #ifdef AUDIO_POLICY_TEST
         Mutex   mLock;
         Condition mWaitWorkCV;
@@ -852,6 +867,13 @@
                 const audio_offload_info_t *offloadInfo);
         // internal function to derive a stream type value from audio attributes
         audio_stream_type_t streamTypefromAttributesInt(const audio_attributes_t *attr);
+        // return true if any output is playing anything besides the stream to ignore
+        bool isAnyOutputActive(audio_stream_type_t streamToIgnore);
+        // event is one of STARTING_OUTPUT, STARTING_BEACON, STOPPING_OUTPUT, STOPPING_BEACON
+        // returns 0 if no mute/unmute event happened, the largest latency of the device where
+        //   the mute/unmute happened
+        uint32_t handleEventForBeacon(int event);
+        uint32_t setBeaconMute(bool mute);
 };
 
 };
diff --git a/services/camera/libcameraservice/api1/client2/Parameters.cpp b/services/camera/libcameraservice/api1/client2/Parameters.cpp
index 7b90d28..42a5507 100644
--- a/services/camera/libcameraservice/api1/client2/Parameters.cpp
+++ b/services/camera/libcameraservice/api1/client2/Parameters.cpp
@@ -2954,6 +2954,10 @@
             staticInfo(ANDROID_SENSOR_INFO_PHYSICAL_SIZE, 2, 2);
     if (!sensorSize.count) return NO_INIT;
 
+    camera_metadata_ro_entry_t pixelArraySize =
+            staticInfo(ANDROID_SENSOR_INFO_PIXEL_ARRAY_SIZE, 2, 2);
+    if (!pixelArraySize.count) return NO_INIT;
+
     float arrayAspect = static_cast<float>(fastInfo.arrayWidth) /
             fastInfo.arrayHeight;
     float stillAspect = static_cast<float>(pictureWidth) / pictureHeight;
@@ -3003,6 +3007,16 @@
         vertCropFactor = (arrayAspect < stillAspect) ?
                 (arrayAspect / stillAspect) : 1.f;
     }
+
+    /**
+     * Convert the crop factors w.r.t the active array size to the crop factors
+     * w.r.t the pixel array size.
+     */
+    horizCropFactor *= (static_cast<float>(fastInfo.arrayWidth) /
+                            pixelArraySize.data.i32[0]);
+    vertCropFactor *= (static_cast<float>(fastInfo.arrayHeight) /
+                            pixelArraySize.data.i32[1]);
+
     ALOGV("Horiz crop factor: %f, vert crop fact: %f",
             horizCropFactor, vertCropFactor);
     /**