Merge "Remove checks for specific sample rates and channel counts"
diff --git a/include/media/AudioTrack.h b/include/media/AudioTrack.h
index 7e9d557..7d23d02 100644
--- a/include/media/AudioTrack.h
+++ b/include/media/AudioTrack.h
@@ -746,6 +746,7 @@
 
     bool                    mInUnderrun;            // whether track is currently in underrun state
     String8                 mName;                  // server's name for this IAudioTrack
+    uint32_t                mPausedPosition;
 
 private:
     class DeathNotifier : public IBinder::DeathRecipient {
diff --git a/media/libmedia/AudioTrack.cpp b/media/libmedia/AudioTrack.cpp
index d25c40b..3217171 100644
--- a/media/libmedia/AudioTrack.cpp
+++ b/media/libmedia/AudioTrack.cpp
@@ -99,7 +99,8 @@
     : mStatus(NO_INIT),
       mIsTimed(false),
       mPreviousPriority(ANDROID_PRIORITY_NORMAL),
-      mPreviousSchedulingGroup(SP_DEFAULT)
+      mPreviousSchedulingGroup(SP_DEFAULT),
+      mPausedPosition(0)
 {
 }
 
@@ -121,7 +122,8 @@
     : mStatus(NO_INIT),
       mIsTimed(false),
       mPreviousPriority(ANDROID_PRIORITY_NORMAL),
-      mPreviousSchedulingGroup(SP_DEFAULT)
+      mPreviousSchedulingGroup(SP_DEFAULT),
+      mPausedPosition(0)
 {
     mStatus = set(streamType, sampleRate, format, channelMask,
             frameCount, flags, cbf, user, notificationFrames,
@@ -147,7 +149,8 @@
     : mStatus(NO_INIT),
       mIsTimed(false),
       mPreviousPriority(ANDROID_PRIORITY_NORMAL),
-      mPreviousSchedulingGroup(SP_DEFAULT)
+      mPreviousSchedulingGroup(SP_DEFAULT),
+      mPausedPosition(0)
 {
     mStatus = set(streamType, sampleRate, format, channelMask,
             0 /*frameCount*/, flags, cbf, user, notificationFrames,
@@ -551,6 +554,16 @@
     }
     mProxy->interrupt();
     mAudioTrack->pause();
+
+    if (isOffloaded()) {
+        if (mOutput != 0) {
+            uint32_t halFrames;
+            // OffloadThread sends HAL pause in its threadLoop.. time saved
+            // here can be slightly off
+            AudioSystem::getRenderPosition(mOutput, &halFrames, &mPausedPosition);
+            ALOGV("AudioTrack::pause for offload, cache current position %u", mPausedPosition);
+        }
+    }
 }
 
 status_t AudioTrack::setVolume(float left, float right)
@@ -770,6 +783,12 @@
     if (isOffloaded_l()) {
         uint32_t dspFrames = 0;
 
+        if ((mState == STATE_PAUSED) || (mState == STATE_PAUSED_STOPPING)) {
+            ALOGV("getPosition called in paused state, return cached position %u", mPausedPosition);
+            *position = mPausedPosition;
+            return NO_ERROR;
+        }
+
         if (mOutput != 0) {
             uint32_t halFrames;
             AudioSystem::getRenderPosition(mOutput, &halFrames, &dspFrames);
@@ -1488,6 +1507,7 @@
     }
     size_t misalignment = mProxy->getMisalignment();
     uint32_t sequence = mSequence;
+    sp<AudioTrackClientProxy> proxy = mProxy;
 
     // These fields don't need to be cached, because they are assigned only by set():
     //     mTransfer, mCbf, mUserData, mFormat, mFrameSize, mFrameSizeAF, mFlags
@@ -1496,35 +1516,32 @@
     mLock.unlock();
 
     if (waitStreamEnd) {
-        AutoMutex lock(mLock);
-
-        sp<AudioTrackClientProxy> proxy = mProxy;
-        sp<IMemory> iMem = mCblkMemory;
-
         struct timespec timeout;
         timeout.tv_sec = WAIT_STREAM_END_TIMEOUT_SEC;
         timeout.tv_nsec = 0;
 
-        mLock.unlock();
-        status_t status = mProxy->waitStreamEndDone(&timeout);
-        mLock.lock();
+        status_t status = proxy->waitStreamEndDone(&timeout);
         switch (status) {
         case NO_ERROR:
         case DEAD_OBJECT:
         case TIMED_OUT:
-            mLock.unlock();
             mCbf(EVENT_STREAM_END, mUserData, NULL);
-            mLock.lock();
-            if (mState == STATE_STOPPING) {
-                mState = STATE_STOPPED;
-                if (status != DEAD_OBJECT) {
-                   return NS_INACTIVE;
+            {
+                AutoMutex lock(mLock);
+                // The previously assigned value of waitStreamEnd is no longer valid,
+                // since the mutex has been unlocked and either the callback handler
+                // or another thread could have re-started the AudioTrack during that time.
+                waitStreamEnd = mState == STATE_STOPPING;
+                if (waitStreamEnd) {
+                    mState = STATE_STOPPED;
                 }
             }
-            return 0;
-        default:
-            return 0;
+            if (waitStreamEnd && status != DEAD_OBJECT) {
+               return NS_INACTIVE;
+            }
+            break;
         }
+        return 0;
     }
 
     // perform callbacks while unlocked
diff --git a/media/libmedia/IAudioPolicyService.cpp b/media/libmedia/IAudioPolicyService.cpp
index 4be3c09..1a027a6 100644
--- a/media/libmedia/IAudioPolicyService.cpp
+++ b/media/libmedia/IAudioPolicyService.cpp
@@ -476,10 +476,11 @@
         case START_OUTPUT: {
             CHECK_INTERFACE(IAudioPolicyService, data, reply);
             audio_io_handle_t output = static_cast <audio_io_handle_t>(data.readInt32());
-            uint32_t stream = data.readInt32();
+            audio_stream_type_t stream =
+                                static_cast <audio_stream_type_t>(data.readInt32());
             int session = data.readInt32();
             reply->writeInt32(static_cast <uint32_t>(startOutput(output,
-                                                                 (audio_stream_type_t)stream,
+                                                                 stream,
                                                                  session)));
             return NO_ERROR;
         } break;
@@ -487,10 +488,11 @@
         case STOP_OUTPUT: {
             CHECK_INTERFACE(IAudioPolicyService, data, reply);
             audio_io_handle_t output = static_cast <audio_io_handle_t>(data.readInt32());
-            uint32_t stream = data.readInt32();
+            audio_stream_type_t stream =
+                                static_cast <audio_stream_type_t>(data.readInt32());
             int session = data.readInt32();
             reply->writeInt32(static_cast <uint32_t>(stopOutput(output,
-                                                                (audio_stream_type_t)stream,
+                                                                stream,
                                                                 session)));
             return NO_ERROR;
         } break;
@@ -633,7 +635,7 @@
             CHECK_INTERFACE(IAudioPolicyService, data, reply);
             audio_stream_type_t stream = (audio_stream_type_t) data.readInt32();
             uint32_t inPastMs = (uint32_t)data.readInt32();
-            reply->writeInt32( isStreamActive((audio_stream_type_t) stream, inPastMs) );
+            reply->writeInt32( isStreamActive(stream, inPastMs) );
             return NO_ERROR;
         } break;
 
@@ -641,7 +643,7 @@
             CHECK_INTERFACE(IAudioPolicyService, data, reply);
             audio_stream_type_t stream = (audio_stream_type_t) data.readInt32();
             uint32_t inPastMs = (uint32_t)data.readInt32();
-            reply->writeInt32( isStreamActiveRemotely((audio_stream_type_t) stream, inPastMs) );
+            reply->writeInt32( isStreamActiveRemotely(stream, inPastMs) );
             return NO_ERROR;
         } break;
 
diff --git a/media/libstagefright/tests/SurfaceMediaSource_test.cpp b/media/libstagefright/tests/SurfaceMediaSource_test.cpp
index aeecdbc..a3093d0 100644
--- a/media/libstagefright/tests/SurfaceMediaSource_test.cpp
+++ b/media/libstagefright/tests/SurfaceMediaSource_test.cpp
@@ -35,7 +35,6 @@
 #include <gui/SurfaceComposerClient.h>
 
 #include <binder/ProcessState.h>
-#include <ui/FramebufferNativeWindow.h>
 
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/MediaBufferGroup.h>
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 357ea22..690d0d6 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -5555,12 +5555,12 @@
     mBufferSize = mInput->stream->common.get_buffer_size(&mInput->stream->common);
     mFrameCount = mBufferSize / mFrameSize;
     // This is the formula for calculating the temporary buffer size.
-    // With 3 HAL buffers, we can guarantee ability to down-sample the input by ratio of 2:1 to
+    // With 7 HAL buffers, we can guarantee ability to down-sample the input by ratio of 6:1 to
     // 1 full output buffer, regardless of the alignment of the available input.
-    // The "3" is somewhat arbitrary, and could probably be larger.
+    // The value is somewhat arbitrary, and could probably be even larger.
     // A larger value should allow more old data to be read after a track calls start(),
     // without increasing latency.
-    mRsmpInFrames = mFrameCount * 3;
+    mRsmpInFrames = mFrameCount * 7;
     mRsmpInFramesP2 = roundup(mRsmpInFrames);
     delete[] mRsmpInBuffer;
     // Over-allocate beyond mRsmpInFramesP2 to permit a HAL read past end of buffer