Merge "Add a method to associate MediaDrm session with MediaCrypto"
diff --git a/include/media/AudioResamplerPublic.h b/include/media/AudioResamplerPublic.h
index 0634741..07d946d 100644
--- a/include/media/AudioResamplerPublic.h
+++ b/include/media/AudioResamplerPublic.h
@@ -34,6 +34,16 @@
 // an int32_t of the phase increments, making the resulting sample rate inexact.
 #define AUDIO_RESAMPLER_UP_RATIO_MAX 65536
 
+#define AUDIO_TIMESTRETCH_SPEED_MIN    0.5f
+#define AUDIO_TIMESTRETCH_SPEED_MAX    2.0f
+#define AUDIO_TIMESTRETCH_SPEED_NORMAL 1.0f
+
+#define AUDIO_TIMESTRETCH_PITCH_MIN    0.5f
+#define AUDIO_TIMESTRETCH_PITCH_MAX    2.0f
+#define AUDIO_TIMESTRETCH_PITCH_NORMAL 1.0f
+
+// TODO: Consider putting these inlines into a class scope
+
 // Returns the source frames needed to resample to destination frames.  This is not a precise
 // value and depends on the resampler (and possibly how it handles rounding internally).
 // Nevertheless, this should be an upper bound on the requirements of the resampler.
@@ -58,4 +68,13 @@
     return dstFrames > 2 ? dstFrames - 2 : 0;
 }
 
+static inline size_t sourceFramesNeededWithTimestretch(
+        uint32_t srcSampleRate, size_t dstFramesRequired, uint32_t dstSampleRate,
+        float speed) {
+    // required is the number of input frames the resampler needs
+    size_t required = sourceFramesNeeded(srcSampleRate, dstFramesRequired, dstSampleRate);
+    // to deliver this, the time stretcher requires:
+    return required * (double)speed + 1 + 1; // accounting for rounding dependencies
+}
+
 #endif // ANDROID_AUDIO_RESAMPLER_PUBLIC_H
diff --git a/include/media/AudioTrack.h b/include/media/AudioTrack.h
index e7e0703..a06197f 100644
--- a/include/media/AudioTrack.h
+++ b/include/media/AudioTrack.h
@@ -359,6 +359,21 @@
     /* Return current source sample rate in Hz */
             uint32_t    getSampleRate() const;
 
+    /* Set source playback rate for timestretch
+     * 1.0 is normal speed: < 1.0 is slower, > 1.0 is faster
+     * 1.0 is normal pitch: < 1.0 is lower pitch, > 1.0 is higher pitch
+     *
+     * AUDIO_TIMESTRETCH_SPEED_MIN <= speed <= AUDIO_TIMESTRETCH_SPEED_MAX
+     * AUDIO_TIMESTRETCH_PITCH_MIN <= pitch <= AUDIO_TIMESTRETCH_PITCH_MAX
+     *
+     * Speed increases the playback rate of media, but does not alter pitch.
+     * Pitch increases the "tonal frequency" of media, but does not affect the playback rate.
+     */
+            status_t    setPlaybackRate(float speed, float pitch);
+
+    /* Return current playback rate */
+            void        getPlaybackRate(float *speed, float *pitch) const;
+
     /* Enables looping and sets the start and end points of looping.
      * Only supported for static buffer mode.
      *
@@ -719,6 +734,9 @@
             // increment mPosition by the delta of mServer, and return new value of mPosition
             uint32_t updateAndGetPosition_l();
 
+            // check sample rate and speed is compatible with AudioTrack
+            bool     isSampleRateSpeedAllowed_l(uint32_t sampleRate, float speed) const;
+
     // Next 4 fields may be changed if IAudioTrack is re-created, but always != 0
     sp<IAudioTrack>         mAudioTrack;
     sp<IMemory>             mCblkMemory;
@@ -730,6 +748,8 @@
     float                   mVolume[2];
     float                   mSendLevel;
     mutable uint32_t        mSampleRate;            // mutable because getSampleRate() can update it
+    float                   mSpeed;                 // timestretch: 1.0f for normal speed.
+    float                   mPitch;                 // timestretch: 1.0f for normal pitch.
     size_t                  mFrameCount;            // corresponds to current IAudioTrack, value is
                                                     // reported back by AudioFlinger to the client
     size_t                  mReqFrameCount;         // frame count to request the first or next time
diff --git a/include/media/stagefright/MediaClock.h b/include/media/stagefright/MediaClock.h
index e9c09a1..dd1a809 100644
--- a/include/media/stagefright/MediaClock.h
+++ b/include/media/stagefright/MediaClock.h
@@ -42,6 +42,7 @@
     void updateMaxTimeMedia(int64_t maxTimeMediaUs);
 
     void setPlaybackRate(float rate);
+    float getPlaybackRate() const;
 
     // query media time corresponding to real time |realUs|, and save the
     // result in |outMediaUs|.
diff --git a/include/private/media/AudioTrackShared.h b/include/private/media/AudioTrackShared.h
index 5644428..6cc2e2b 100644
--- a/include/private/media/AudioTrackShared.h
+++ b/include/private/media/AudioTrackShared.h
@@ -25,6 +25,7 @@
 #include <utils/Log.h>
 #include <utils/RefBase.h>
 #include <audio_utils/roundup.h>
+#include <media/AudioResamplerPublic.h>
 #include <media/SingleStateQueue.h>
 
 namespace android {
@@ -113,6 +114,14 @@
                     mPosLoopQueue;
 };
 
+
+struct AudioTrackPlaybackRate {
+    float mSpeed;
+    float mPitch;
+};
+
+typedef SingleStateQueue<AudioTrackPlaybackRate> AudioTrackPlaybackRateQueue;
+
 // ----------------------------------------------------------------------------
 
 // Important: do not add any virtual methods, including ~
@@ -159,6 +168,8 @@
                 uint32_t    mSampleRate;    // AudioTrack only: client's requested sample rate in Hz
                                             // or 0 == default. Write-only client, read-only server.
 
+                AudioTrackPlaybackRateQueue::Shared mPlaybackRateQueue;
+
                 // client write-only, server read-only
                 uint16_t    mSendLevel;      // Fixed point U4.12 so 0x1000 means 1.0
 
@@ -313,7 +324,8 @@
     AudioTrackClientProxy(audio_track_cblk_t* cblk, void *buffers, size_t frameCount,
             size_t frameSize, bool clientInServer = false)
         : ClientProxy(cblk, buffers, frameCount, frameSize, true /*isOut*/,
-          clientInServer) { }
+          clientInServer),
+          mPlaybackRateMutator(&cblk->mPlaybackRateQueue) { }
     virtual ~AudioTrackClientProxy() { }
 
     // No barriers on the following operations, so the ordering of loads/stores
@@ -333,6 +345,13 @@
         mCblk->mSampleRate = sampleRate;
     }
 
+    void        setPlaybackRate(float speed, float pitch) {
+        AudioTrackPlaybackRate playbackRate;
+        playbackRate.mSpeed = speed;
+        playbackRate.mPitch = pitch;
+        mPlaybackRateMutator.push(playbackRate);
+    }
+
     virtual void flush();
 
     virtual uint32_t    getUnderrunFrames() const {
@@ -344,6 +363,9 @@
     bool        getStreamEndDone() const;
 
     status_t    waitStreamEndDone(const struct timespec *requested);
+
+private:
+    AudioTrackPlaybackRateQueue::Mutator   mPlaybackRateMutator;
 };
 
 class StaticAudioTrackClientProxy : public AudioTrackClientProxy {
@@ -458,8 +480,11 @@
 public:
     AudioTrackServerProxy(audio_track_cblk_t* cblk, void *buffers, size_t frameCount,
             size_t frameSize, bool clientInServer = false, uint32_t sampleRate = 0)
-        : ServerProxy(cblk, buffers, frameCount, frameSize, true /*isOut*/, clientInServer) {
+        : ServerProxy(cblk, buffers, frameCount, frameSize, true /*isOut*/, clientInServer),
+          mPlaybackRateObserver(&cblk->mPlaybackRateQueue) {
         mCblk->mSampleRate = sampleRate;
+        mPlaybackRate.mSpeed = AUDIO_TIMESTRETCH_SPEED_NORMAL;
+        mPlaybackRate.mPitch = AUDIO_TIMESTRETCH_PITCH_NORMAL;
     }
 protected:
     virtual ~AudioTrackServerProxy() { }
@@ -493,6 +518,13 @@
 
     // Return the total number of frames that AudioFlinger has obtained and released
     virtual size_t      framesReleased() const { return mCblk->mServer; }
+
+    // Return the playback speed and pitch read atomically. Not multi-thread safe on server side.
+    void                getPlaybackRate(float *speed, float *pitch);
+
+private:
+    AudioTrackPlaybackRate                  mPlaybackRate;  // last observed playback rate
+    AudioTrackPlaybackRateQueue::Observer   mPlaybackRateObserver;
 };
 
 class StaticAudioTrackServerProxy : public AudioTrackServerProxy {
diff --git a/media/libmedia/AudioTrack.cpp b/media/libmedia/AudioTrack.cpp
index 9e9ec5b..89138e2 100644
--- a/media/libmedia/AudioTrack.cpp
+++ b/media/libmedia/AudioTrack.cpp
@@ -56,6 +56,24 @@
     return convertTimespecToUs(tv);
 }
 
+// Must match similar computation in createTrack_l in Threads.cpp.
+// TODO: Move to a common library
+static size_t calculateMinFrameCount(
+        uint32_t afLatencyMs, uint32_t afFrameCount, uint32_t afSampleRate,
+        uint32_t sampleRate, float speed)
+{
+    // Ensure that buffer depth covers at least audio hardware latency
+    uint32_t minBufCount = afLatencyMs / ((1000 * afFrameCount) / afSampleRate);
+    if (minBufCount < 2) {
+        minBufCount = 2;
+    }
+    ALOGV("calculateMinFrameCount afLatency %u  afFrameCount %u  afSampleRate %u  "
+            "sampleRate %u  speed %f  minBufCount: %u",
+            afLatencyMs, afFrameCount, afSampleRate, sampleRate, speed, minBufCount);
+    return minBufCount * sourceFramesNeededWithTimestretch(
+            sampleRate, afFrameCount, afSampleRate, speed);
+}
+
 // static
 status_t AudioTrack::getMinFrameCount(
         size_t* frameCount,
@@ -94,13 +112,10 @@
         return status;
     }
 
-    // Ensure that buffer depth covers at least audio hardware latency
-    uint32_t minBufCount = afLatency / ((1000 * afFrameCount) / afSampleRate);
-    if (minBufCount < 2) {
-        minBufCount = 2;
-    }
+    // When called from createTrack, speed is 1.0f (normal speed).
+    // This is rechecked again on setting playback rate (TODO: on setting sample rate, too).
+    *frameCount = calculateMinFrameCount(afLatency, afFrameCount, afSampleRate, sampleRate, 1.0f);
 
-    *frameCount = minBufCount * sourceFramesNeeded(sampleRate, afFrameCount, afSampleRate);
     // The formula above should always produce a non-zero value under normal circumstances:
     // AudioTrack.SAMPLE_RATE_HZ_MIN <= sampleRate <= AudioTrack.SAMPLE_RATE_HZ_MAX.
     // Return error in the unlikely event that it does not, as that's part of the API contract.
@@ -109,8 +124,8 @@
                 streamType, sampleRate);
         return BAD_VALUE;
     }
-    ALOGV("getMinFrameCount=%zu: afFrameCount=%zu, minBufCount=%u, afSampleRate=%u, afLatency=%u",
-            *frameCount, afFrameCount, minBufCount, afSampleRate, afLatency);
+    ALOGV("getMinFrameCount=%zu: afFrameCount=%zu, afSampleRate=%u, afLatency=%u",
+            *frameCount, afFrameCount, afSampleRate, afLatency);
     return NO_ERROR;
 }
 
@@ -360,6 +375,8 @@
         return BAD_VALUE;
     }
     mSampleRate = sampleRate;
+    mSpeed = AUDIO_TIMESTRETCH_SPEED_NORMAL;
+    mPitch = AUDIO_TIMESTRETCH_PITCH_NORMAL;
 
     // Make copy of input parameter offloadInfo so that in the future:
     //  (a) createTrack_l doesn't need it as an input parameter
@@ -689,6 +706,7 @@
     if (rate == 0 || rate > afSamplingRate * AUDIO_RESAMPLER_DOWN_RATIO_MAX) {
         return BAD_VALUE;
     }
+    // TODO: Should we also check if the buffer size is compatible?
 
     mSampleRate = rate;
     mProxy->setSampleRate(rate);
@@ -719,6 +737,42 @@
     return mSampleRate;
 }
 
+status_t AudioTrack::setPlaybackRate(float speed, float pitch)
+{
+    if (speed < AUDIO_TIMESTRETCH_SPEED_MIN
+            || speed > AUDIO_TIMESTRETCH_SPEED_MAX
+            || pitch < AUDIO_TIMESTRETCH_PITCH_MIN
+            || pitch > AUDIO_TIMESTRETCH_PITCH_MAX) {
+        return BAD_VALUE;
+    }
+    AutoMutex lock(mLock);
+    if (speed == mSpeed && pitch == mPitch) {
+        return NO_ERROR;
+    }
+    if (mIsTimed || isOffloadedOrDirect_l()) {
+        return INVALID_OPERATION;
+    }
+    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
+        return INVALID_OPERATION;
+    }
+    // Check if the buffer size is compatible.
+    if (!isSampleRateSpeedAllowed_l(mSampleRate, speed)) {
+        ALOGV("setPlaybackRate(%f, %f) failed", speed, pitch);
+        return BAD_VALUE;
+    }
+    mSpeed = speed;
+    mPitch = pitch;
+    mProxy->setPlaybackRate(speed, pitch);
+    return NO_ERROR;
+}
+
+void AudioTrack::getPlaybackRate(float *speed, float *pitch) const
+{
+    AutoMutex lock(mLock);
+    *speed = mSpeed;
+    *pitch = mPitch;
+}
+
 status_t AudioTrack::setLoop(uint32_t loopStart, uint32_t loopEnd, int loopCount)
 {
     if (mSharedBuffer == 0 || mIsTimed || isOffloadedOrDirect()) {
@@ -1086,8 +1140,16 @@
         // there _is_ a frameCount parameter.  We silently ignore it.
         frameCount = mSharedBuffer->size() / mFrameSize;
     } else {
-        // For fast and normal streaming tracks,
-        // the frame count calculations and checks are done by server
+        // For fast tracks the frame count calculations and checks are done by server
+
+        if ((mFlags & AUDIO_OUTPUT_FLAG_FAST) == 0) {
+            // for normal tracks precompute the frame count based on speed.
+            const size_t minFrameCount = calculateMinFrameCount(
+                    afLatency, afFrameCount, afSampleRate, mSampleRate, mSpeed);
+            if (frameCount < minFrameCount) {
+                frameCount = minFrameCount;
+            }
+        }
     }
 
     IAudioFlinger::track_flags_t trackFlags = IAudioFlinger::TRACK_DEFAULT;
@@ -1230,6 +1292,7 @@
     }
 
     mAudioTrack->attachAuxEffect(mAuxEffectId);
+    // FIXME doesn't take into account speed or future sample rate changes (until restoreTrack)
     // FIXME don't believe this lie
     mLatency = afLatency + (1000*frameCount) / mSampleRate;
 
@@ -1255,6 +1318,7 @@
 
     mProxy->setSendLevel(mSendLevel);
     mProxy->setSampleRate(mSampleRate);
+    mProxy->setPlaybackRate(mSpeed, mPitch);
     mProxy->setMinimum(mNotificationFramesAct);
 
     mDeathNotifier = new DeathNotifier(this);
@@ -1617,6 +1681,7 @@
 
     // Cache other fields that will be needed soon
     uint32_t sampleRate = mSampleRate;
+    float speed = mSpeed;
     uint32_t notificationFrames = mNotificationFramesAct;
     if (mRefreshRemaining) {
         mRefreshRemaining = false;
@@ -1745,7 +1810,7 @@
     if (minFrames != (uint32_t) ~0) {
         // This "fudge factor" avoids soaking CPU, and compensates for late progress by server
         static const nsecs_t kFudgeNs = 10000000LL; // 10 ms
-        ns = ((minFrames * 1000000000LL) / sampleRate) + kFudgeNs;
+        ns = ((double)minFrames * 1000000000) / ((double)sampleRate * speed) + kFudgeNs;
     }
 
     // If not supplying data by EVENT_MORE_DATA, then we're done
@@ -1786,7 +1851,8 @@
         if (mRetryOnPartialBuffer && !isOffloaded()) {
             mRetryOnPartialBuffer = false;
             if (avail < mRemainingFrames) {
-                int64_t myns = ((mRemainingFrames - avail) * 1100000000LL) / sampleRate;
+                int64_t myns = ((double)(mRemainingFrames - avail) * 1100000000)
+                        / ((double)sampleRate * speed);
                 if (ns < 0 || myns < ns) {
                     ns = myns;
                 }
@@ -1841,7 +1907,7 @@
         // that total to a sum == notificationFrames.
         if (0 < misalignment && misalignment <= mRemainingFrames) {
             mRemainingFrames = misalignment;
-            return (mRemainingFrames * 1100000000LL) / sampleRate;
+            return ((double)mRemainingFrames * 1100000000) / ((double)sampleRate * speed);
         }
 #endif
 
@@ -1936,6 +2002,41 @@
     return mPosition += (uint32_t) delta;
 }
 
+bool AudioTrack::isSampleRateSpeedAllowed_l(uint32_t sampleRate, float speed) const
+{
+    // applicable for mixing tracks only (not offloaded or direct)
+    if (mStaticProxy != 0) {
+        return true; // static tracks do not have issues with buffer sizing.
+    }
+    status_t status;
+    uint32_t afLatency;
+    status = AudioSystem::getLatency(mOutput, &afLatency);
+    if (status != NO_ERROR) {
+        ALOGE("getLatency(%d) failed status %d", mOutput, status);
+        return false;
+    }
+
+    size_t afFrameCount;
+    status = AudioSystem::getFrameCount(mOutput, &afFrameCount);
+    if (status != NO_ERROR) {
+        ALOGE("getFrameCount(output=%d) status %d", mOutput, status);
+        return false;
+    }
+
+    uint32_t afSampleRate;
+    status = AudioSystem::getSamplingRate(mOutput, &afSampleRate);
+    if (status != NO_ERROR) {
+        ALOGE("getSamplingRate(output=%d) status %d", mOutput, status);
+        return false;
+    }
+
+    const size_t minFrameCount =
+            calculateMinFrameCount(afLatency, afFrameCount, afSampleRate, sampleRate, speed);
+    ALOGV("isSampleRateSpeedAllowed_l mFrameCount %zu  minFrameCount %zu",
+            mFrameCount, minFrameCount);
+    return mFrameCount >= minFrameCount;
+}
+
 status_t AudioTrack::setParameters(const String8& keyValuePairs)
 {
     AutoMutex lock(mLock);
@@ -2001,7 +2102,8 @@
                     return WOULD_BLOCK;  // stale timestamp time, occurs before start.
                 }
                 const int64_t deltaTimeUs = timestampTimeUs - mStartUs;
-                const int64_t deltaPositionByUs = timestamp.mPosition * 1000000LL / mSampleRate;
+                const int64_t deltaPositionByUs = (double)timestamp.mPosition * 1000000
+                        / ((double)mSampleRate * mSpeed);
 
                 if (deltaPositionByUs > deltaTimeUs + kTimeJitterUs) {
                     // Verify that the counter can't count faster than the sample rate
@@ -2088,7 +2190,8 @@
     snprintf(buffer, 255, "  format(%d), channel count(%d), frame count(%zu)\n", mFormat,
             mChannelCount, mFrameCount);
     result.append(buffer);
-    snprintf(buffer, 255, "  sample rate(%u), status(%d)\n", mSampleRate, mStatus);
+    snprintf(buffer, 255, "  sample rate(%u), speed(%f), status(%d)\n",
+            mSampleRate, mSpeed, mStatus);
     result.append(buffer);
     snprintf(buffer, 255, "  state(%d), latency (%d)\n", mState, mLatency);
     result.append(buffer);
diff --git a/media/libmedia/AudioTrackShared.cpp b/media/libmedia/AudioTrackShared.cpp
index 6d5f1af..ba67b40 100644
--- a/media/libmedia/AudioTrackShared.cpp
+++ b/media/libmedia/AudioTrackShared.cpp
@@ -793,6 +793,16 @@
     (void) android_atomic_or(CBLK_UNDERRUN, &cblk->mFlags);
 }
 
+void AudioTrackServerProxy::getPlaybackRate(float *speed, float *pitch)
+{   // do not call from multiple threads without holding lock
+    AudioTrackPlaybackRate playbackRate;
+    if (mPlaybackRateObserver.poll(playbackRate)) {
+        mPlaybackRate = playbackRate;
+    }
+    *speed = mPlaybackRate.mSpeed;
+    *pitch = mPlaybackRate.mPitch;
+}
+
 // ---------------------------------------------------------------------------
 
 StaticAudioTrackServerProxy::StaticAudioTrackServerProxy(audio_track_cblk_t* cblk, void *buffers,
diff --git a/media/libstagefright/ESDS.cpp b/media/libstagefright/ESDS.cpp
index 427bf7b..8fbb57c 100644
--- a/media/libstagefright/ESDS.cpp
+++ b/media/libstagefright/ESDS.cpp
@@ -136,6 +136,8 @@
     --size;
 
     if (streamDependenceFlag) {
+        if (size < 2)
+            return ERROR_MALFORMED;
         offset += 2;
         size -= 2;
     }
@@ -145,11 +147,15 @@
             return ERROR_MALFORMED;
         }
         unsigned URLlength = mData[offset];
+        if (URLlength >= size)
+            return ERROR_MALFORMED;
         offset += URLlength + 1;
         size -= URLlength + 1;
     }
 
     if (OCRstreamFlag) {
+        if (size < 2)
+            return ERROR_MALFORMED;
         offset += 2;
         size -= 2;
 
diff --git a/media/libstagefright/MPEG4Extractor.cpp b/media/libstagefright/MPEG4Extractor.cpp
index 910ae32..f7fa2b6 100644
--- a/media/libstagefright/MPEG4Extractor.cpp
+++ b/media/libstagefright/MPEG4Extractor.cpp
@@ -874,6 +874,9 @@
                     }
                 }
 
+                if (mLastTrack == NULL)
+                    return ERROR_MALFORMED;
+
                 mLastTrack->sampleTable = new SampleTable(mDataSource);
             }
 
@@ -1028,6 +1031,10 @@
             }
             original_fourcc = ntohl(original_fourcc);
             ALOGV("read original format: %d", original_fourcc);
+
+            if (mLastTrack == NULL)
+                return ERROR_MALFORMED;
+
             mLastTrack->meta->setCString(kKeyMIMEType, FourCC2MIME(original_fourcc));
             uint32_t num_channels = 0;
             uint32_t sample_rate = 0;
@@ -1083,6 +1090,9 @@
                 return ERROR_IO;
             }
 
+            if (mLastTrack == NULL)
+                return ERROR_MALFORMED;
+
             mLastTrack->meta->setInt32(kKeyCryptoMode, defaultAlgorithmId);
             mLastTrack->meta->setInt32(kKeyCryptoDefaultIVSize, defaultIVSize);
             mLastTrack->meta->setData(kKeyCryptoKey, 'tenc', defaultKeyId, 16);
@@ -1198,7 +1208,7 @@
                     duration = ntohl(duration32);
                 }
             }
-            if (duration != 0) {
+            if (duration != 0 && mLastTrack->timescale != 0) {
                 mLastTrack->meta->setInt64(
                         kKeyDuration, (duration * 1000000) / mLastTrack->timescale);
             }
@@ -1262,6 +1272,10 @@
                 // display the timed text.
                 // For encrypted files, there may also be more than one entry.
                 const char *mime;
+
+                if (mLastTrack == NULL)
+                    return ERROR_MALFORMED;
+
                 CHECK(mLastTrack->meta->findCString(kKeyMIMEType, &mime));
                 if (strcasecmp(mime, MEDIA_MIMETYPE_TEXT_3GPP) &&
                         strcasecmp(mime, "application/octet-stream")) {
@@ -1308,6 +1322,9 @@
             uint16_t sample_size = U16_AT(&buffer[18]);
             uint32_t sample_rate = U32_AT(&buffer[24]) >> 16;
 
+            if (mLastTrack == NULL)
+                return ERROR_MALFORMED;
+
             if (chunk_type != FOURCC('e', 'n', 'c', 'a')) {
                 // if the chunk type is enca, we'll get the type from the sinf/frma box later
                 mLastTrack->meta->setCString(kKeyMIMEType, FourCC2MIME(chunk_type));
@@ -1369,6 +1386,9 @@
             // printf("*** coding='%s' width=%d height=%d\n",
             //        chunk, width, height);
 
+            if (mLastTrack == NULL)
+                return ERROR_MALFORMED;
+
             if (chunk_type != FOURCC('e', 'n', 'c', 'v')) {
                 // if the chunk type is encv, we'll get the type from the sinf/frma box later
                 mLastTrack->meta->setCString(kKeyMIMEType, FourCC2MIME(chunk_type));
@@ -1394,6 +1414,9 @@
         case FOURCC('s', 't', 'c', 'o'):
         case FOURCC('c', 'o', '6', '4'):
         {
+            if ((mLastTrack == NULL) || (mLastTrack->sampleTable == NULL))
+                return ERROR_MALFORMED;
+
             status_t err =
                 mLastTrack->sampleTable->setChunkOffsetParams(
                         chunk_type, data_offset, chunk_data_size);
@@ -1409,6 +1432,9 @@
 
         case FOURCC('s', 't', 's', 'c'):
         {
+            if ((mLastTrack == NULL) || (mLastTrack->sampleTable == NULL))
+                return ERROR_MALFORMED;
+
             status_t err =
                 mLastTrack->sampleTable->setSampleToChunkParams(
                         data_offset, chunk_data_size);
@@ -1425,6 +1451,9 @@
         case FOURCC('s', 't', 's', 'z'):
         case FOURCC('s', 't', 'z', '2'):
         {
+            if ((mLastTrack == NULL) || (mLastTrack->sampleTable == NULL))
+                return ERROR_MALFORMED;
+
             status_t err =
                 mLastTrack->sampleTable->setSampleSizeParams(
                         chunk_type, data_offset, chunk_data_size);
@@ -1494,6 +1523,9 @@
 
         case FOURCC('s', 't', 't', 's'):
         {
+            if ((mLastTrack == NULL) || (mLastTrack->sampleTable == NULL))
+                return ERROR_MALFORMED;
+
             *offset += chunk_size;
 
             status_t err =
@@ -1509,6 +1541,9 @@
 
         case FOURCC('c', 't', 't', 's'):
         {
+            if ((mLastTrack == NULL) || (mLastTrack->sampleTable == NULL))
+                return ERROR_MALFORMED;
+
             *offset += chunk_size;
 
             status_t err =
@@ -1524,6 +1559,9 @@
 
         case FOURCC('s', 't', 's', 's'):
         {
+            if ((mLastTrack == NULL) || (mLastTrack->sampleTable == NULL))
+                return ERROR_MALFORMED;
+
             *offset += chunk_size;
 
             status_t err =
@@ -1596,6 +1634,9 @@
                 return ERROR_MALFORMED;
             }
 
+            if (mLastTrack == NULL)
+                return ERROR_MALFORMED;
+
             mLastTrack->meta->setData(
                     kKeyESDS, kTypeESDS, &buffer[4], chunk_data_size - 4);
 
@@ -1628,6 +1669,9 @@
                 return ERROR_IO;
             }
 
+            if (mLastTrack == NULL)
+                return ERROR_MALFORMED;
+
             mLastTrack->meta->setData(
                     kKeyAVCC, kTypeAVCC, buffer->data(), chunk_data_size);
 
@@ -1642,6 +1686,9 @@
                 return ERROR_IO;
             }
 
+            if (mLastTrack == NULL)
+                return ERROR_MALFORMED;
+
             mLastTrack->meta->setData(
                     kKeyHVCC, kTypeHVCC, buffer->data(), chunk_data_size);
 
@@ -1675,6 +1722,9 @@
                 return ERROR_IO;
             }
 
+            if (mLastTrack == NULL)
+                return ERROR_MALFORMED;
+
             mLastTrack->meta->setData(kKeyD263, kTypeD263, buffer, chunk_data_size);
 
             break;
@@ -1772,7 +1822,7 @@
                 }
                 duration = d32;
             }
-            if (duration != 0) {
+            if (duration != 0 && mHeaderTimescale != 0) {
                 mFileMetaData->setInt64(kKeyDuration, duration * 1000000 / mHeaderTimescale);
             }
 
@@ -1821,7 +1871,7 @@
                 return ERROR_MALFORMED;
             }
 
-            if (duration != 0) {
+            if (duration != 0 && mHeaderTimescale != 0) {
                 mFileMetaData->setInt64(kKeyDuration, duration * 1000000 / mHeaderTimescale);
             }
 
@@ -1856,6 +1906,9 @@
                 return ERROR_IO;
             }
 
+            if (mLastTrack == NULL)
+                return ERROR_MALFORMED;
+
             uint32_t type = ntohl(buffer);
             // For the 3GPP file format, the handler-type within the 'hdlr' box
             // shall be 'text'. We also want to support 'sbtl' handler type
@@ -1888,6 +1941,9 @@
 
         case FOURCC('t', 'x', '3', 'g'):
         {
+            if (mLastTrack == NULL)
+                return ERROR_MALFORMED;
+
             uint32_t type;
             const void *data;
             size_t size = 0;
@@ -2029,6 +2085,8 @@
         return ERROR_MALFORMED;
     }
     ALOGV("sidx refid/timescale: %d/%d", referenceId, timeScale);
+    if (timeScale == 0)
+        return ERROR_MALFORMED;
 
     uint64_t earliestPresentationTime;
     uint64_t firstOffset;
@@ -2112,6 +2170,9 @@
 
     uint64_t sidxDuration = total_duration * 1000000 / timeScale;
 
+    if (mLastTrack == NULL)
+        return ERROR_MALFORMED;
+
     int64_t metaDuration;
     if (!mLastTrack->meta->findInt64(kKeyDuration, &metaDuration) || metaDuration == 0) {
         mLastTrack->meta->setInt64(kKeyDuration, sidxDuration);
@@ -2162,6 +2223,9 @@
         return ERROR_UNSUPPORTED;
     }
 
+    if (mLastTrack == NULL)
+        return ERROR_MALFORMED;
+
     mLastTrack->meta->setInt32(kKeyTrackID, id);
 
     size_t matrixOffset = dynSize + 16;
@@ -2344,6 +2408,9 @@
                     int32_t delay, padding;
                     if (sscanf(mLastCommentData,
                                " %*x %x %x %*x", &delay, &padding) == 2) {
+                        if (mLastTrack == NULL)
+                            return ERROR_MALFORMED;
+
                         mLastTrack->meta->setInt32(kKeyEncoderDelay, delay);
                         mLastTrack->meta->setInt32(kKeyEncoderPadding, padding);
                     }
@@ -2711,6 +2778,9 @@
 
     if (objectTypeIndication == 0xe1) {
         // This isn't MPEG4 audio at all, it's QCELP 14k...
+        if (mLastTrack == NULL)
+            return ERROR_MALFORMED;
+
         mLastTrack->meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_QCELP);
         return OK;
     }
@@ -2759,6 +2829,9 @@
         objectType = 32 + br.getBits(6);
     }
 
+    if (mLastTrack == NULL)
+        return ERROR_MALFORMED;
+
     //keep AOT type
     mLastTrack->meta->setInt32(kKeyAACAOT, objectType);
 
@@ -2929,6 +3002,9 @@
         return ERROR_UNSUPPORTED;
     }
 
+    if (mLastTrack == NULL)
+        return ERROR_MALFORMED;
+
     int32_t prevSampleRate;
     CHECK(mLastTrack->meta->findInt32(kKeySampleRate, &prevSampleRate));
 
diff --git a/media/libstagefright/MediaClock.cpp b/media/libstagefright/MediaClock.cpp
index 433f555..2641e4e 100644
--- a/media/libstagefright/MediaClock.cpp
+++ b/media/libstagefright/MediaClock.cpp
@@ -92,6 +92,11 @@
     mPlaybackRate = rate;
 }
 
+float MediaClock::getPlaybackRate() const {
+    Mutex::Autolock autoLock(mLock);
+    return mPlaybackRate;
+}
+
 status_t MediaClock::getMediaTime(
         int64_t realUs, int64_t *outMediaUs, bool allowPastMaxTime) const {
     if (outMediaUs == NULL) {
diff --git a/media/libstagefright/SampleTable.cpp b/media/libstagefright/SampleTable.cpp
index bdd6d56..aba64d5 100644
--- a/media/libstagefright/SampleTable.cpp
+++ b/media/libstagefright/SampleTable.cpp
@@ -230,8 +230,13 @@
         return ERROR_MALFORMED;
     }
 
+    if (SIZE_MAX / sizeof(SampleToChunkEntry) <= mNumSampleToChunkOffsets)
+        return ERROR_OUT_OF_RANGE;
+
     mSampleToChunkEntries =
-        new SampleToChunkEntry[mNumSampleToChunkOffsets];
+        new (std::nothrow) SampleToChunkEntry[mNumSampleToChunkOffsets];
+    if (!mSampleToChunkEntries)
+        return ERROR_OUT_OF_RANGE;
 
     for (uint32_t i = 0; i < mNumSampleToChunkOffsets; ++i) {
         uint8_t buffer[12];
@@ -330,11 +335,13 @@
     }
 
     mTimeToSampleCount = U32_AT(&header[4]);
-    uint64_t allocSize = mTimeToSampleCount * 2 * sizeof(uint32_t);
+    uint64_t allocSize = mTimeToSampleCount * 2 * (uint64_t)sizeof(uint32_t);
     if (allocSize > SIZE_MAX) {
         return ERROR_OUT_OF_RANGE;
     }
-    mTimeToSample = new uint32_t[mTimeToSampleCount * 2];
+    mTimeToSample = new (std::nothrow) uint32_t[mTimeToSampleCount * 2];
+    if (!mTimeToSample)
+        return ERROR_OUT_OF_RANGE;
 
     size_t size = sizeof(uint32_t) * mTimeToSampleCount * 2;
     if (mDataSource->readAt(
@@ -376,12 +383,14 @@
     }
 
     mNumCompositionTimeDeltaEntries = numEntries;
-    uint64_t allocSize = numEntries * 2 * sizeof(uint32_t);
+    uint64_t allocSize = numEntries * 2 * (uint64_t)sizeof(uint32_t);
     if (allocSize > SIZE_MAX) {
         return ERROR_OUT_OF_RANGE;
     }
 
-    mCompositionTimeDeltaEntries = new uint32_t[2 * numEntries];
+    mCompositionTimeDeltaEntries = new (std::nothrow) uint32_t[2 * numEntries];
+    if (!mCompositionTimeDeltaEntries)
+        return ERROR_OUT_OF_RANGE;
 
     if (mDataSource->readAt(
                 data_offset + 8, mCompositionTimeDeltaEntries, numEntries * 8)
@@ -426,12 +435,15 @@
         ALOGV("Table of sync samples is empty or has only a single entry!");
     }
 
-    uint64_t allocSize = mNumSyncSamples * sizeof(uint32_t);
+    uint64_t allocSize = mNumSyncSamples * (uint64_t)sizeof(uint32_t);
     if (allocSize > SIZE_MAX) {
         return ERROR_OUT_OF_RANGE;
     }
 
-    mSyncSamples = new uint32_t[mNumSyncSamples];
+    mSyncSamples = new (std::nothrow) uint32_t[mNumSyncSamples];
+    if (!mSyncSamples)
+        return ERROR_OUT_OF_RANGE;
+
     size_t size = mNumSyncSamples * sizeof(uint32_t);
     if (mDataSource->readAt(mSyncSampleOffset + 8, mSyncSamples, size)
             != (ssize_t)size) {
@@ -499,7 +511,9 @@
         return;
     }
 
-    mSampleTimeEntries = new SampleTimeEntry[mNumSampleSizes];
+    mSampleTimeEntries = new (std::nothrow) SampleTimeEntry[mNumSampleSizes];
+    if (!mSampleTimeEntries)
+        return;
 
     uint32_t sampleIndex = 0;
     uint32_t sampleTime = 0;
diff --git a/media/libstagefright/httplive/LiveSession.cpp b/media/libstagefright/httplive/LiveSession.cpp
index 26f8da1..74f58e9 100644
--- a/media/libstagefright/httplive/LiveSession.cpp
+++ b/media/libstagefright/httplive/LiveSession.cpp
@@ -1455,6 +1455,10 @@
     if (bandwidthIndex >= 0) {
         mOrigBandwidthIndex = mCurBandwidthIndex;
         mCurBandwidthIndex = bandwidthIndex;
+        if (mOrigBandwidthIndex != mCurBandwidthIndex) {
+            ALOGI("#### Starting Bandwidth Switch: %zd => %zd",
+                    mOrigBandwidthIndex, mCurBandwidthIndex);
+        }
     }
     CHECK_LT(mCurBandwidthIndex, mBandwidthItems.size());
     const BandwidthItem &item = mBandwidthItems.itemAt(mCurBandwidthIndex);
@@ -1574,6 +1578,7 @@
 
     if (timeUs >= 0) {
         mLastSeekTimeUs = timeUs;
+        mLastDequeuedTimeUs = timeUs;
 
         for (size_t i = 0; i < mPacketSources.size(); i++) {
             mPacketSources.editValueAt(i)->clear();
@@ -1626,8 +1631,10 @@
             ALOGV("stream %zu changed: oldURI %s, newURI %s", i,
                     mStreams[i].mUri.c_str(), URIs[i].c_str());
             sp<AnotherPacketSource> source = mPacketSources.valueFor(indexToType(i));
-            source->queueDiscontinuity(
-                    ATSParser::DISCONTINUITY_FORMATCHANGE, NULL, true);
+            if (source->getLatestDequeuedMeta() != NULL) {
+                source->queueDiscontinuity(
+                        ATSParser::DISCONTINUITY_FORMATCHANGE, NULL, true);
+            }
         }
         // Determine which decoders to shutdown on the player side,
         // a decoder has to be shutdown if its streamtype was active
@@ -1687,10 +1694,6 @@
             // and resume audio.
             mSwapMask =  mNewStreamMask & mStreamMask & ~resumeMask;
             switching = (mSwapMask != 0);
-            if (!switching) {
-                ALOGV("#### Finishing Bandwidth Switch Early: %zd => %zd",
-                        mOrigBandwidthIndex, mCurBandwidthIndex);
-            }
         }
         mRealTimeBaseUs = ALooper::GetNowUs() - mLastDequeuedTimeUs;
     } else {
@@ -1843,7 +1846,11 @@
         mSwitchInProgress = true;
     } else {
         mStreamMask = mNewStreamMask;
-        mOrigBandwidthIndex = mCurBandwidthIndex;
+        if (mOrigBandwidthIndex != mCurBandwidthIndex) {
+            ALOGV("#### Finished Bandwidth Switch Early: %zd => %zd",
+                    mOrigBandwidthIndex, mCurBandwidthIndex);
+            mOrigBandwidthIndex = mCurBandwidthIndex;
+        }
     }
 
     ALOGV("onChangeConfiguration3: mSwitchInProgress %d, mStreamMask 0x%x",
@@ -1970,11 +1977,19 @@
 
     bool underflow, ready, down, up;
     if (checkBuffering(underflow, ready, down, up)) {
-        if (mInPreparationPhase && ready) {
-            postPrepared(OK);
+        if (mInPreparationPhase) {
+            // Allow down switch even if we're still preparing.
+            //
+            // Some streams have a high bandwidth index as default,
+            // when bandwidth is low, it takes a long time to buffer
+            // to ready mark, then it immediately pauses after start
+            // as we have to do a down switch. It's better experience
+            // to restart from a lower index, if we detect low bw.
+            if (!switchBandwidthIfNeeded(false /* up */, down) && ready) {
+                postPrepared(OK);
+            }
         }
 
-        // don't switch before we report prepared
         if (!mInPreparationPhase) {
             if (ready) {
                 stopBufferingIfNecessary();
@@ -1982,8 +1997,7 @@
                 startBufferingIfNecessary();
             }
             switchBandwidthIfNeeded(up, down);
-       }
-
+        }
     }
 
     schedulePollBuffering();
@@ -2075,7 +2089,8 @@
             if (mPacketSources[i]->isFinished(0 /* duration */)) {
                 percent = 100;
             } else {
-                percent = (int32_t)(100.0 * (mLastDequeuedTimeUs + bufferedDurationUs) / durationUs);
+                percent = (int32_t)(100.0 *
+                        (mLastDequeuedTimeUs + bufferedDurationUs) / durationUs);
             }
             if (minBufferPercent < 0 || percent < minBufferPercent) {
                 minBufferPercent = percent;
@@ -2158,10 +2173,14 @@
     notify->post();
 }
 
-void LiveSession::switchBandwidthIfNeeded(bool bufferHigh, bool bufferLow) {
+/*
+ * returns true if a bandwidth switch is actually needed (and started),
+ * returns false otherwise
+ */
+bool LiveSession::switchBandwidthIfNeeded(bool bufferHigh, bool bufferLow) {
     // no need to check bandwidth if we only have 1 bandwidth settings
     if (mSwitchInProgress || mBandwidthItems.size() < 2) {
-        return;
+        return false;
     }
 
     int32_t bandwidthBps;
@@ -2170,7 +2189,7 @@
         mLastBandwidthBps = bandwidthBps;
     } else {
         ALOGV("no bandwidth estimate.");
-        return;
+        return false;
     }
 
     int32_t curBandwidth = mBandwidthItems.itemAt(mCurBandwidthIndex).mBandwidth;
@@ -2189,16 +2208,16 @@
         // bandwidthIndex is < mCurBandwidthIndex, as getBandwidthIndex() only uses 70%
         // of measured bw. In that case we don't want to do anything, since we have
         // both enough buffer and enough bw.
-        if (bandwidthIndex == mCurBandwidthIndex
-                || (canSwitchUp && bandwidthIndex < mCurBandwidthIndex)
-                || (canSwithDown && bandwidthIndex > mCurBandwidthIndex)) {
-            return;
+        if ((canSwitchUp && bandwidthIndex > mCurBandwidthIndex)
+         || (canSwithDown && bandwidthIndex < mCurBandwidthIndex)) {
+            // if not yet prepared, just restart again with new bw index.
+            // this is faster and playback experience is cleaner.
+            changeConfiguration(
+                    mInPreparationPhase ? 0 : -1ll, bandwidthIndex);
+            return true;
         }
-
-        ALOGI("#### Starting Bandwidth Switch: %zd => %zd",
-                mCurBandwidthIndex, bandwidthIndex);
-        changeConfiguration(-1, bandwidthIndex, false);
     }
+    return false;
 }
 
 void LiveSession::postError(status_t err) {
diff --git a/media/libstagefright/httplive/LiveSession.h b/media/libstagefright/httplive/LiveSession.h
index c587f40..9117bb1 100644
--- a/media/libstagefright/httplive/LiveSession.h
+++ b/media/libstagefright/httplive/LiveSession.h
@@ -292,7 +292,7 @@
     bool checkSwitchProgress(
             sp<AMessage> &msg, int64_t delayUs, bool *needResumeUntil);
 
-    void switchBandwidthIfNeeded(bool bufferHigh, bool bufferLow);
+    bool switchBandwidthIfNeeded(bool bufferHigh, bool bufferLow);
 
     void schedulePollBuffering();
     void cancelPollBuffering();
diff --git a/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp b/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp
index 0676a33..c7912c0 100644
--- a/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp
+++ b/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp
@@ -355,10 +355,15 @@
     int64_t time2 = -1;
     int64_t durationUs = 0;
 
-    List<sp<ABuffer> >::iterator it = mBuffers.begin();
-    while (it != mBuffers.end()) {
+    List<sp<ABuffer> >::iterator it;
+    for (it = mBuffers.begin(); it != mBuffers.end(); it++) {
         const sp<ABuffer> &buffer = *it;
 
+        int32_t discard;
+        if (buffer->meta()->findInt32("discard", &discard) && discard) {
+            continue;
+        }
+
         int64_t timeUs;
         if (buffer->meta()->findInt64("timeUs", &timeUs)) {
             if (time1 < 0 || timeUs < time1) {
@@ -373,8 +378,6 @@
             durationUs += time2 - time1;
             time1 = time2 = -1;
         }
-
-        ++it;
     }
 
     return durationUs + (time2 - time1);
@@ -393,11 +396,19 @@
         return getBufferedDurationUs_l(&finalResult);
     }
 
-    List<sp<ABuffer> >::iterator it = mBuffers.begin();
-    sp<ABuffer> buffer = *it;
+    sp<ABuffer> buffer;
+    int32_t discard;
+    int64_t startTimeUs = -1ll;
+    List<sp<ABuffer> >::iterator it;
+    for (it = mBuffers.begin(); it != mBuffers.end(); it++) {
+        buffer = *it;
+        if (buffer->meta()->findInt32("discard", &discard) && discard) {
+            continue;
+        }
+        buffer->meta()->findInt64("timeUs", &startTimeUs);
+        break;
+    }
 
-    int64_t startTimeUs;
-    buffer->meta()->findInt64("timeUs", &startTimeUs);
     if (startTimeUs < 0) {
         return 0;
     }
diff --git a/services/audioflinger/Android.mk b/services/audioflinger/Android.mk
index fee2347..f8446ac 100644
--- a/services/audioflinger/Android.mk
+++ b/services/audioflinger/Android.mk
@@ -44,9 +44,9 @@
     SpdifStreamOut.cpp          \
     Effects.cpp                 \
     AudioMixer.cpp.arm          \
-    PatchPanel.cpp
-
-LOCAL_SRC_FILES += StateQueue.cpp
+    BufferProviders.cpp         \
+    PatchPanel.cpp              \
+    StateQueue.cpp
 
 LOCAL_C_INCLUDES := \
     $(TOPDIR)frameworks/av/services/audiopolicy \
diff --git a/services/audioflinger/AudioHwDevice.cpp b/services/audioflinger/AudioHwDevice.cpp
index 09d86ea..3191598 100644
--- a/services/audioflinger/AudioHwDevice.cpp
+++ b/services/audioflinger/AudioHwDevice.cpp
@@ -44,7 +44,7 @@
     AudioStreamOut *outputStream = new AudioStreamOut(this, flags);
 
     // Try to open the HAL first using the current format.
-    ALOGV("AudioHwDevice::openOutputStream(), try "
+    ALOGV("openOutputStream(), try "
             " sampleRate %d, Format %#x, "
             "channelMask %#x",
             config->sample_rate,
@@ -59,7 +59,7 @@
         // FIXME Look at any modification to the config.
         // The HAL might modify the config to suggest a wrapped format.
         // Log this so we can see what the HALs are doing.
-        ALOGI("AudioHwDevice::openOutputStream(), HAL returned"
+        ALOGI("openOutputStream(), HAL returned"
             " sampleRate %d, Format %#x, "
             "channelMask %#x, status %d",
             config->sample_rate,
@@ -72,16 +72,19 @@
                 && ((flags & AUDIO_OUTPUT_FLAG_DIRECT) != 0)
                 && ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) == 0);
 
-        // FIXME - Add isEncodingSupported() query to SPDIF wrapper then
-        // call it from here.
         if (wrapperNeeded) {
-            outputStream = new SpdifStreamOut(this, flags);
-            status = outputStream->open(handle, devices, &originalConfig, address);
-            if (status != NO_ERROR) {
-                ALOGE("ERROR - AudioHwDevice::openOutputStream(), SPDIF open returned %d",
-                    status);
-                delete outputStream;
-                outputStream = NULL;
+            if (SPDIFEncoder::isFormatSupported(originalConfig.format)) {
+                outputStream = new SpdifStreamOut(this, flags, originalConfig.format);
+                status = outputStream->open(handle, devices, &originalConfig, address);
+                if (status != NO_ERROR) {
+                    ALOGE("ERROR - openOutputStream(), SPDIF open returned %d",
+                        status);
+                    delete outputStream;
+                    outputStream = NULL;
+                }
+            } else {
+                ALOGE("ERROR - openOutputStream(), SPDIFEncoder does not support format 0x%08x",
+                    originalConfig.format);
             }
         }
     }
diff --git a/services/audioflinger/AudioMixer.cpp b/services/audioflinger/AudioMixer.cpp
index dddca02..c2c791f 100644
--- a/services/audioflinger/AudioMixer.cpp
+++ b/services/audioflinger/AudioMixer.cpp
@@ -38,9 +38,7 @@
 #include <audio_utils/format.h>
 #include <common_time/local_clock.h>
 #include <common_time/cc_helper.h>
-
-#include <media/EffectsFactoryApi.h>
-#include <audio_effects/effect_downmix.h>
+#include <media/AudioResamplerPublic.h>
 
 #include "AudioMixerOps.h"
 #include "AudioMixer.h"
@@ -91,323 +89,6 @@
     return a < b ? a : b;
 }
 
-AudioMixer::CopyBufferProvider::CopyBufferProvider(size_t inputFrameSize,
-        size_t outputFrameSize, size_t bufferFrameCount) :
-        mInputFrameSize(inputFrameSize),
-        mOutputFrameSize(outputFrameSize),
-        mLocalBufferFrameCount(bufferFrameCount),
-        mLocalBufferData(NULL),
-        mConsumed(0)
-{
-    ALOGV("CopyBufferProvider(%p)(%zu, %zu, %zu)", this,
-            inputFrameSize, outputFrameSize, bufferFrameCount);
-    LOG_ALWAYS_FATAL_IF(inputFrameSize < outputFrameSize && bufferFrameCount == 0,
-            "Requires local buffer if inputFrameSize(%zu) < outputFrameSize(%zu)",
-            inputFrameSize, outputFrameSize);
-    if (mLocalBufferFrameCount) {
-        (void)posix_memalign(&mLocalBufferData, 32, mLocalBufferFrameCount * mOutputFrameSize);
-    }
-    mBuffer.frameCount = 0;
-}
-
-AudioMixer::CopyBufferProvider::~CopyBufferProvider()
-{
-    ALOGV("~CopyBufferProvider(%p)", this);
-    if (mBuffer.frameCount != 0) {
-        mTrackBufferProvider->releaseBuffer(&mBuffer);
-    }
-    free(mLocalBufferData);
-}
-
-status_t AudioMixer::CopyBufferProvider::getNextBuffer(AudioBufferProvider::Buffer *pBuffer,
-        int64_t pts)
-{
-    //ALOGV("CopyBufferProvider(%p)::getNextBuffer(%p (%zu), %lld)",
-    //        this, pBuffer, pBuffer->frameCount, pts);
-    if (mLocalBufferFrameCount == 0) {
-        status_t res = mTrackBufferProvider->getNextBuffer(pBuffer, pts);
-        if (res == OK) {
-            copyFrames(pBuffer->raw, pBuffer->raw, pBuffer->frameCount);
-        }
-        return res;
-    }
-    if (mBuffer.frameCount == 0) {
-        mBuffer.frameCount = pBuffer->frameCount;
-        status_t res = mTrackBufferProvider->getNextBuffer(&mBuffer, pts);
-        // At one time an upstream buffer provider had
-        // res == OK and mBuffer.frameCount == 0, doesn't seem to happen now 7/18/2014.
-        //
-        // By API spec, if res != OK, then mBuffer.frameCount == 0.
-        // but there may be improper implementations.
-        ALOG_ASSERT(res == OK || mBuffer.frameCount == 0);
-        if (res != OK || mBuffer.frameCount == 0) { // not needed by API spec, but to be safe.
-            pBuffer->raw = NULL;
-            pBuffer->frameCount = 0;
-            return res;
-        }
-        mConsumed = 0;
-    }
-    ALOG_ASSERT(mConsumed < mBuffer.frameCount);
-    size_t count = min(mLocalBufferFrameCount, mBuffer.frameCount - mConsumed);
-    count = min(count, pBuffer->frameCount);
-    pBuffer->raw = mLocalBufferData;
-    pBuffer->frameCount = count;
-    copyFrames(pBuffer->raw, (uint8_t*)mBuffer.raw + mConsumed * mInputFrameSize,
-            pBuffer->frameCount);
-    return OK;
-}
-
-void AudioMixer::CopyBufferProvider::releaseBuffer(AudioBufferProvider::Buffer *pBuffer)
-{
-    //ALOGV("CopyBufferProvider(%p)::releaseBuffer(%p(%zu))",
-    //        this, pBuffer, pBuffer->frameCount);
-    if (mLocalBufferFrameCount == 0) {
-        mTrackBufferProvider->releaseBuffer(pBuffer);
-        return;
-    }
-    // LOG_ALWAYS_FATAL_IF(pBuffer->frameCount == 0, "Invalid framecount");
-    mConsumed += pBuffer->frameCount; // TODO: update for efficiency to reuse existing content
-    if (mConsumed != 0 && mConsumed >= mBuffer.frameCount) {
-        mTrackBufferProvider->releaseBuffer(&mBuffer);
-        ALOG_ASSERT(mBuffer.frameCount == 0);
-    }
-    pBuffer->raw = NULL;
-    pBuffer->frameCount = 0;
-}
-
-void AudioMixer::CopyBufferProvider::reset()
-{
-    if (mBuffer.frameCount != 0) {
-        mTrackBufferProvider->releaseBuffer(&mBuffer);
-    }
-    mConsumed = 0;
-}
-
-AudioMixer::DownmixerBufferProvider::DownmixerBufferProvider(
-        audio_channel_mask_t inputChannelMask,
-        audio_channel_mask_t outputChannelMask, audio_format_t format,
-        uint32_t sampleRate, int32_t sessionId, size_t bufferFrameCount) :
-        CopyBufferProvider(
-            audio_bytes_per_sample(format) * audio_channel_count_from_out_mask(inputChannelMask),
-            audio_bytes_per_sample(format) * audio_channel_count_from_out_mask(outputChannelMask),
-            bufferFrameCount)  // set bufferFrameCount to 0 to do in-place
-{
-    ALOGV("DownmixerBufferProvider(%p)(%#x, %#x, %#x %u %d)",
-            this, inputChannelMask, outputChannelMask, format,
-            sampleRate, sessionId);
-    if (!sIsMultichannelCapable
-            || EffectCreate(&sDwnmFxDesc.uuid,
-                    sessionId,
-                    SESSION_ID_INVALID_AND_IGNORED,
-                    &mDownmixHandle) != 0) {
-         ALOGE("DownmixerBufferProvider() error creating downmixer effect");
-         mDownmixHandle = NULL;
-         return;
-     }
-     // channel input configuration will be overridden per-track
-     mDownmixConfig.inputCfg.channels = inputChannelMask;   // FIXME: Should be bits
-     mDownmixConfig.outputCfg.channels = outputChannelMask; // FIXME: should be bits
-     mDownmixConfig.inputCfg.format = format;
-     mDownmixConfig.outputCfg.format = format;
-     mDownmixConfig.inputCfg.samplingRate = sampleRate;
-     mDownmixConfig.outputCfg.samplingRate = sampleRate;
-     mDownmixConfig.inputCfg.accessMode = EFFECT_BUFFER_ACCESS_READ;
-     mDownmixConfig.outputCfg.accessMode = EFFECT_BUFFER_ACCESS_WRITE;
-     // input and output buffer provider, and frame count will not be used as the downmix effect
-     // process() function is called directly (see DownmixerBufferProvider::getNextBuffer())
-     mDownmixConfig.inputCfg.mask = EFFECT_CONFIG_SMP_RATE | EFFECT_CONFIG_CHANNELS |
-             EFFECT_CONFIG_FORMAT | EFFECT_CONFIG_ACC_MODE;
-     mDownmixConfig.outputCfg.mask = mDownmixConfig.inputCfg.mask;
-
-     int cmdStatus;
-     uint32_t replySize = sizeof(int);
-
-     // Configure downmixer
-     status_t status = (*mDownmixHandle)->command(mDownmixHandle,
-             EFFECT_CMD_SET_CONFIG /*cmdCode*/, sizeof(effect_config_t) /*cmdSize*/,
-             &mDownmixConfig /*pCmdData*/,
-             &replySize, &cmdStatus /*pReplyData*/);
-     if (status != 0 || cmdStatus != 0) {
-         ALOGE("DownmixerBufferProvider() error %d cmdStatus %d while configuring downmixer",
-                 status, cmdStatus);
-         EffectRelease(mDownmixHandle);
-         mDownmixHandle = NULL;
-         return;
-     }
-
-     // Enable downmixer
-     replySize = sizeof(int);
-     status = (*mDownmixHandle)->command(mDownmixHandle,
-             EFFECT_CMD_ENABLE /*cmdCode*/, 0 /*cmdSize*/, NULL /*pCmdData*/,
-             &replySize, &cmdStatus /*pReplyData*/);
-     if (status != 0 || cmdStatus != 0) {
-         ALOGE("DownmixerBufferProvider() error %d cmdStatus %d while enabling downmixer",
-                 status, cmdStatus);
-         EffectRelease(mDownmixHandle);
-         mDownmixHandle = NULL;
-         return;
-     }
-
-     // Set downmix type
-     // parameter size rounded for padding on 32bit boundary
-     const int psizePadded = ((sizeof(downmix_params_t) - 1)/sizeof(int) + 1) * sizeof(int);
-     const int downmixParamSize =
-             sizeof(effect_param_t) + psizePadded + sizeof(downmix_type_t);
-     effect_param_t * const param = (effect_param_t *) malloc(downmixParamSize);
-     param->psize = sizeof(downmix_params_t);
-     const downmix_params_t downmixParam = DOWNMIX_PARAM_TYPE;
-     memcpy(param->data, &downmixParam, param->psize);
-     const downmix_type_t downmixType = DOWNMIX_TYPE_FOLD;
-     param->vsize = sizeof(downmix_type_t);
-     memcpy(param->data + psizePadded, &downmixType, param->vsize);
-     replySize = sizeof(int);
-     status = (*mDownmixHandle)->command(mDownmixHandle,
-             EFFECT_CMD_SET_PARAM /* cmdCode */, downmixParamSize /* cmdSize */,
-             param /*pCmdData*/, &replySize, &cmdStatus /*pReplyData*/);
-     free(param);
-     if (status != 0 || cmdStatus != 0) {
-         ALOGE("DownmixerBufferProvider() error %d cmdStatus %d while setting downmix type",
-                 status, cmdStatus);
-         EffectRelease(mDownmixHandle);
-         mDownmixHandle = NULL;
-         return;
-     }
-     ALOGV("DownmixerBufferProvider() downmix type set to %d", (int) downmixType);
-}
-
-AudioMixer::DownmixerBufferProvider::~DownmixerBufferProvider()
-{
-    ALOGV("~DownmixerBufferProvider (%p)", this);
-    EffectRelease(mDownmixHandle);
-    mDownmixHandle = NULL;
-}
-
-void AudioMixer::DownmixerBufferProvider::copyFrames(void *dst, const void *src, size_t frames)
-{
-    mDownmixConfig.inputCfg.buffer.frameCount = frames;
-    mDownmixConfig.inputCfg.buffer.raw = const_cast<void *>(src);
-    mDownmixConfig.outputCfg.buffer.frameCount = frames;
-    mDownmixConfig.outputCfg.buffer.raw = dst;
-    // may be in-place if src == dst.
-    status_t res = (*mDownmixHandle)->process(mDownmixHandle,
-            &mDownmixConfig.inputCfg.buffer, &mDownmixConfig.outputCfg.buffer);
-    ALOGE_IF(res != OK, "DownmixBufferProvider error %d", res);
-}
-
-/* call once in a pthread_once handler. */
-/*static*/ status_t AudioMixer::DownmixerBufferProvider::init()
-{
-    // find multichannel downmix effect if we have to play multichannel content
-    uint32_t numEffects = 0;
-    int ret = EffectQueryNumberEffects(&numEffects);
-    if (ret != 0) {
-        ALOGE("AudioMixer() error %d querying number of effects", ret);
-        return NO_INIT;
-    }
-    ALOGV("EffectQueryNumberEffects() numEffects=%d", numEffects);
-
-    for (uint32_t i = 0 ; i < numEffects ; i++) {
-        if (EffectQueryEffect(i, &sDwnmFxDesc) == 0) {
-            ALOGV("effect %d is called %s", i, sDwnmFxDesc.name);
-            if (memcmp(&sDwnmFxDesc.type, EFFECT_UIID_DOWNMIX, sizeof(effect_uuid_t)) == 0) {
-                ALOGI("found effect \"%s\" from %s",
-                        sDwnmFxDesc.name, sDwnmFxDesc.implementor);
-                sIsMultichannelCapable = true;
-                break;
-            }
-        }
-    }
-    ALOGW_IF(!sIsMultichannelCapable, "unable to find downmix effect");
-    return NO_INIT;
-}
-
-/*static*/ bool AudioMixer::DownmixerBufferProvider::sIsMultichannelCapable = false;
-/*static*/ effect_descriptor_t AudioMixer::DownmixerBufferProvider::sDwnmFxDesc;
-
-AudioMixer::RemixBufferProvider::RemixBufferProvider(audio_channel_mask_t inputChannelMask,
-        audio_channel_mask_t outputChannelMask, audio_format_t format,
-        size_t bufferFrameCount) :
-        CopyBufferProvider(
-                audio_bytes_per_sample(format)
-                    * audio_channel_count_from_out_mask(inputChannelMask),
-                audio_bytes_per_sample(format)
-                    * audio_channel_count_from_out_mask(outputChannelMask),
-                bufferFrameCount),
-        mFormat(format),
-        mSampleSize(audio_bytes_per_sample(format)),
-        mInputChannels(audio_channel_count_from_out_mask(inputChannelMask)),
-        mOutputChannels(audio_channel_count_from_out_mask(outputChannelMask))
-{
-    ALOGV("RemixBufferProvider(%p)(%#x, %#x, %#x) %zu %zu",
-            this, format, inputChannelMask, outputChannelMask,
-            mInputChannels, mOutputChannels);
-
-    const audio_channel_representation_t inputRepresentation =
-            audio_channel_mask_get_representation(inputChannelMask);
-    const audio_channel_representation_t outputRepresentation =
-            audio_channel_mask_get_representation(outputChannelMask);
-    const uint32_t inputBits = audio_channel_mask_get_bits(inputChannelMask);
-    const uint32_t outputBits = audio_channel_mask_get_bits(outputChannelMask);
-
-    switch (inputRepresentation) {
-    case AUDIO_CHANNEL_REPRESENTATION_POSITION:
-        switch (outputRepresentation) {
-        case AUDIO_CHANNEL_REPRESENTATION_POSITION:
-            memcpy_by_index_array_initialization(mIdxAry, ARRAY_SIZE(mIdxAry),
-                    outputBits, inputBits);
-            return;
-        case AUDIO_CHANNEL_REPRESENTATION_INDEX:
-            // TODO: output channel index mask not currently allowed
-            // fall through
-        default:
-            break;
-        }
-        break;
-    case AUDIO_CHANNEL_REPRESENTATION_INDEX:
-        switch (outputRepresentation) {
-        case AUDIO_CHANNEL_REPRESENTATION_POSITION:
-            memcpy_by_index_array_initialization_src_index(mIdxAry, ARRAY_SIZE(mIdxAry),
-                    outputBits, inputBits);
-            return;
-        case AUDIO_CHANNEL_REPRESENTATION_INDEX:
-            // TODO: output channel index mask not currently allowed
-            // fall through
-        default:
-            break;
-        }
-        break;
-    default:
-        break;
-    }
-    LOG_ALWAYS_FATAL("invalid channel mask conversion from %#x to %#x",
-            inputChannelMask, outputChannelMask);
-}
-
-void AudioMixer::RemixBufferProvider::copyFrames(void *dst, const void *src, size_t frames)
-{
-    memcpy_by_index_array(dst, mOutputChannels,
-            src, mInputChannels, mIdxAry, mSampleSize, frames);
-}
-
-AudioMixer::ReformatBufferProvider::ReformatBufferProvider(int32_t channels,
-        audio_format_t inputFormat, audio_format_t outputFormat,
-        size_t bufferFrameCount) :
-        CopyBufferProvider(
-            channels * audio_bytes_per_sample(inputFormat),
-            channels * audio_bytes_per_sample(outputFormat),
-            bufferFrameCount),
-        mChannels(channels),
-        mInputFormat(inputFormat),
-        mOutputFormat(outputFormat)
-{
-    ALOGV("ReformatBufferProvider(%p)(%d, %#x, %#x)", this, channels, inputFormat, outputFormat);
-}
-
-void AudioMixer::ReformatBufferProvider::copyFrames(void *dst, const void *src, size_t frames)
-{
-    memcpy_by_audio_format(dst, mOutputFormat, src, mInputFormat, frames * mChannels);
-}
-
 // ----------------------------------------------------------------------------
 
 // Ensure mConfiguredNames bitmask is initialized properly on all architectures.
@@ -442,6 +123,7 @@
         t->resampler = NULL;
         t->downmixerBufferProvider = NULL;
         t->mReformatBufferProvider = NULL;
+        t->mTimestretchBufferProvider = NULL;
         t++;
     }
 
@@ -454,6 +136,7 @@
         delete t->resampler;
         delete t->downmixerBufferProvider;
         delete t->mReformatBufferProvider;
+        delete t->mTimestretchBufferProvider;
         t++;
     }
     delete [] mState.outputTemp;
@@ -532,6 +215,7 @@
         t->mReformatBufferProvider = NULL;
         t->downmixerBufferProvider = NULL;
         t->mPostDownmixReformatBufferProvider = NULL;
+        t->mTimestretchBufferProvider = NULL;
         t->mMixerFormat = AUDIO_FORMAT_PCM_16_BIT;
         t->mFormat = format;
         t->mMixerInFormat = selectMixerInFormat(format);
@@ -539,6 +223,8 @@
         t->mMixerChannelMask = audio_channel_mask_from_representation_and_bits(
                 AUDIO_CHANNEL_REPRESENTATION_POSITION, AUDIO_CHANNEL_OUT_STEREO);
         t->mMixerChannelCount = audio_channel_count_from_out_mask(t->mMixerChannelMask);
+        t->mSpeed = AUDIO_TIMESTRETCH_SPEED_NORMAL;
+        t->mPitch = AUDIO_TIMESTRETCH_PITCH_NORMAL;
         // Check the downmixing (or upmixing) requirements.
         status_t status = t->prepareForDownmix();
         if (status != OK) {
@@ -731,6 +417,10 @@
         mPostDownmixReformatBufferProvider->setBufferProvider(bufferProvider);
         bufferProvider = mPostDownmixReformatBufferProvider;
     }
+    if (mTimestretchBufferProvider) {
+        mTimestretchBufferProvider->setBufferProvider(bufferProvider);
+        bufferProvider = mTimestretchBufferProvider;
+    }
 }
 
 void AudioMixer::deleteTrackName(int name)
@@ -751,7 +441,9 @@
     mState.tracks[name].unprepareForDownmix();
     // delete the reformatter
     mState.tracks[name].unprepareForReformat();
-
+    // delete the timestretch provider
+    delete track.mTimestretchBufferProvider;
+    track.mTimestretchBufferProvider = NULL;
     mTrackNames &= ~(1<<name);
 }
 
@@ -973,6 +665,26 @@
             }
         }
         break;
+        case TIMESTRETCH:
+            switch (param) {
+            case PLAYBACK_RATE: {
+                const float speed = reinterpret_cast<float*>(value)[0];
+                const float pitch = reinterpret_cast<float*>(value)[1];
+                ALOG_ASSERT(AUDIO_TIMESTRETCH_SPEED_MIN <= speed
+                        && speed <= AUDIO_TIMESTRETCH_SPEED_MAX,
+                        "bad speed %f", speed);
+                ALOG_ASSERT(AUDIO_TIMESTRETCH_PITCH_MIN <= pitch
+                        && pitch <= AUDIO_TIMESTRETCH_PITCH_MAX,
+                        "bad pitch %f", pitch);
+                if (track.setPlaybackRate(speed, pitch)) {
+                    ALOGV("setParameter(TIMESTRETCH, PLAYBACK_RATE, %f %f", speed, pitch);
+                    // invalidateState(1 << name);
+                }
+                } break;
+            default:
+                LOG_ALWAYS_FATAL("setParameter timestretch: bad param %d", param);
+            }
+            break;
 
     default:
         LOG_ALWAYS_FATAL("setParameter: bad target %d", target);
@@ -1018,6 +730,28 @@
     return false;
 }
 
+bool AudioMixer::track_t::setPlaybackRate(float speed, float pitch)
+{
+    if (speed == mSpeed && pitch == mPitch) {
+        return false;
+    }
+    mSpeed = speed;
+    mPitch = pitch;
+    if (mTimestretchBufferProvider == NULL) {
+        // TODO: Remove MONO_HACK. Resampler sees #channels after the downmixer
+        // but if none exists, it is the channel count (1 for mono).
+        const int timestretchChannelCount = downmixerBufferProvider != NULL
+                ? mMixerChannelCount : channelCount;
+        mTimestretchBufferProvider = new TimestretchBufferProvider(timestretchChannelCount,
+                mMixerInFormat, sampleRate, speed, pitch);
+        reconfigureBufferProviders();
+    } else {
+        reinterpret_cast<TimestretchBufferProvider*>(mTimestretchBufferProvider)
+                ->setPlaybackRate(speed, pitch);
+    }
+    return true;
+}
+
 /* Checks to see if the volume ramp has completed and clears the increment
  * variables appropriately.
  *
@@ -1096,6 +830,8 @@
         mState.tracks[name].downmixerBufferProvider->reset();
     } else if (mState.tracks[name].mPostDownmixReformatBufferProvider != NULL) {
         mState.tracks[name].mPostDownmixReformatBufferProvider->reset();
+    } else if (mState.tracks[name].mTimestretchBufferProvider != NULL) {
+        mState.tracks[name].mTimestretchBufferProvider->reset();
     }
 
     mState.tracks[name].mInputBufferProvider = bufferProvider;
diff --git a/services/audioflinger/AudioMixer.h b/services/audioflinger/AudioMixer.h
index 381036b..e27a0d1 100644
--- a/services/audioflinger/AudioMixer.h
+++ b/services/audioflinger/AudioMixer.h
@@ -29,6 +29,7 @@
 #include <utils/threads.h>
 
 #include "AudioResampler.h"
+#include "BufferProviders.h"
 
 // FIXME This is actually unity gain, which might not be max in future, expressed in U.12
 #define MAX_GAIN_INT AudioMixer::UNITY_GAIN_INT
@@ -72,6 +73,7 @@
         RESAMPLE        = 0x3001,
         RAMP_VOLUME     = 0x3002, // ramp to new volume
         VOLUME          = 0x3003, // don't ramp
+        TIMESTRETCH     = 0x3004,
 
         // set Parameter names
         // for target TRACK
@@ -99,6 +101,9 @@
         VOLUME0         = 0x4200,
         VOLUME1         = 0x4201,
         AUXLEVEL        = 0x4210,
+        // for target TIMESTRETCH
+        PLAYBACK_RATE   = 0x4300, // Configure timestretch on this track name;
+                                  // parameter 'value' is a pointer to the new playback rate.
     };
 
 
@@ -159,7 +164,6 @@
 
     struct state_t;
     struct track_t;
-    class CopyBufferProvider;
 
     typedef void (*hook_t)(track_t* t, int32_t* output, size_t numOutFrames, int32_t* temp,
                            int32_t* aux);
@@ -214,6 +218,9 @@
 
         /* Buffer providers are constructed to translate the track input data as needed.
          *
+         * TODO: perhaps make a single PlaybackConverterProvider class to move
+         * all pre-mixer track buffer conversions outside the AudioMixer class.
+         *
          * 1) mInputBufferProvider: The AudioTrack buffer provider.
          * 2) mReformatBufferProvider: If not NULL, performs the audio reformat to
          *    match either mMixerInFormat or mDownmixRequiresFormat, if the downmixer
@@ -223,13 +230,14 @@
          *    the number of channels required by the mixer sink.
          * 4) mPostDownmixReformatBufferProvider: If not NULL, performs reformatting from
          *    the downmixer requirements to the mixer engine input requirements.
+         * 5) mTimestretchBufferProvider: Adds timestretching for playback rate
          */
         AudioBufferProvider*     mInputBufferProvider;    // externally provided buffer provider.
-        CopyBufferProvider*      mReformatBufferProvider; // provider wrapper for reformatting.
-        CopyBufferProvider*      downmixerBufferProvider; // wrapper for channel conversion.
-        CopyBufferProvider*      mPostDownmixReformatBufferProvider;
+        PassthruBufferProvider*  mReformatBufferProvider; // provider wrapper for reformatting.
+        PassthruBufferProvider*  downmixerBufferProvider; // wrapper for channel conversion.
+        PassthruBufferProvider*  mPostDownmixReformatBufferProvider;
+        PassthruBufferProvider*  mTimestretchBufferProvider;
 
-        // 16-byte boundary
         int32_t     sessionId;
 
         audio_format_t mMixerFormat;     // output mix format: AUDIO_FORMAT_PCM_(FLOAT|16_BIT)
@@ -251,6 +259,9 @@
         audio_channel_mask_t mMixerChannelMask;
         uint32_t             mMixerChannelCount;
 
+        float          mSpeed;
+        float          mPitch;
+
         bool        needsRamp() { return (volumeInc[0] | volumeInc[1] | auxInc) != 0; }
         bool        setResampler(uint32_t trackSampleRate, uint32_t devSampleRate);
         bool        doesResample() const { return resampler != NULL; }
@@ -263,6 +274,7 @@
         void        unprepareForDownmix();
         status_t    prepareForReformat();
         void        unprepareForReformat();
+        bool        setPlaybackRate(float speed, float pitch);
         void        reconfigureBufferProviders();
     };
 
@@ -282,112 +294,6 @@
         track_t         tracks[MAX_NUM_TRACKS] __attribute__((aligned(32)));
     };
 
-    // Base AudioBufferProvider class used for DownMixerBufferProvider, RemixBufferProvider,
-    // and ReformatBufferProvider.
-    // It handles a private buffer for use in converting format or channel masks from the
-    // input data to a form acceptable by the mixer.
-    // TODO: Make a ResamplerBufferProvider when integers are entirely removed from the
-    // processing pipeline.
-    class CopyBufferProvider : public AudioBufferProvider {
-    public:
-        // Use a private buffer of bufferFrameCount frames (each frame is outputFrameSize bytes).
-        // If bufferFrameCount is 0, no private buffer is created and in-place modification of
-        // the upstream buffer provider's buffers is performed by copyFrames().
-        CopyBufferProvider(size_t inputFrameSize, size_t outputFrameSize,
-                size_t bufferFrameCount);
-        virtual ~CopyBufferProvider();
-
-        // Overrides AudioBufferProvider methods
-        virtual status_t getNextBuffer(Buffer* buffer, int64_t pts);
-        virtual void releaseBuffer(Buffer* buffer);
-
-        // Other public methods
-
-        // call this to release the buffer to the upstream provider.
-        // treat it as an audio discontinuity for future samples.
-        virtual void reset();
-
-        // this function should be supplied by the derived class.  It converts
-        // #frames in the *src pointer to the *dst pointer.  It is public because
-        // some providers will allow this to work on arbitrary buffers outside
-        // of the internal buffers.
-        virtual void copyFrames(void *dst, const void *src, size_t frames) = 0;
-
-        // set the upstream buffer provider. Consider calling "reset" before this function.
-        void setBufferProvider(AudioBufferProvider *p) {
-            mTrackBufferProvider = p;
-        }
-
-    protected:
-        AudioBufferProvider* mTrackBufferProvider;
-        const size_t         mInputFrameSize;
-        const size_t         mOutputFrameSize;
-    private:
-        AudioBufferProvider::Buffer mBuffer;
-        const size_t         mLocalBufferFrameCount;
-        void*                mLocalBufferData;
-        size_t               mConsumed;
-    };
-
-    // DownmixerBufferProvider wraps a track AudioBufferProvider to provide
-    // position dependent downmixing by an Audio Effect.
-    class DownmixerBufferProvider : public CopyBufferProvider {
-    public:
-        DownmixerBufferProvider(audio_channel_mask_t inputChannelMask,
-                audio_channel_mask_t outputChannelMask, audio_format_t format,
-                uint32_t sampleRate, int32_t sessionId, size_t bufferFrameCount);
-        virtual ~DownmixerBufferProvider();
-        virtual void copyFrames(void *dst, const void *src, size_t frames);
-        bool isValid() const { return mDownmixHandle != NULL; }
-
-        static status_t init();
-        static bool isMultichannelCapable() { return sIsMultichannelCapable; }
-
-    protected:
-        effect_handle_t    mDownmixHandle;
-        effect_config_t    mDownmixConfig;
-
-        // effect descriptor for the downmixer used by the mixer
-        static effect_descriptor_t sDwnmFxDesc;
-        // indicates whether a downmix effect has been found and is usable by this mixer
-        static bool                sIsMultichannelCapable;
-        // FIXME: should we allow effects outside of the framework?
-        // We need to here. A special ioId that must be <= -2 so it does not map to a session.
-        static const int32_t SESSION_ID_INVALID_AND_IGNORED = -2;
-    };
-
-    // RemixBufferProvider wraps a track AudioBufferProvider to perform an
-    // upmix or downmix to the proper channel count and mask.
-    class RemixBufferProvider : public CopyBufferProvider {
-    public:
-        RemixBufferProvider(audio_channel_mask_t inputChannelMask,
-                audio_channel_mask_t outputChannelMask, audio_format_t format,
-                size_t bufferFrameCount);
-        virtual void copyFrames(void *dst, const void *src, size_t frames);
-
-    protected:
-        const audio_format_t mFormat;
-        const size_t         mSampleSize;
-        const size_t         mInputChannels;
-        const size_t         mOutputChannels;
-        int8_t               mIdxAry[sizeof(uint32_t)*8]; // 32 bits => channel indices
-    };
-
-    // ReformatBufferProvider wraps a track AudioBufferProvider to convert the input data
-    // to an acceptable mixer input format type.
-    class ReformatBufferProvider : public CopyBufferProvider {
-    public:
-        ReformatBufferProvider(int32_t channels,
-                audio_format_t inputFormat, audio_format_t outputFormat,
-                size_t bufferFrameCount);
-        virtual void copyFrames(void *dst, const void *src, size_t frames);
-
-    protected:
-        const int32_t        mChannels;
-        const audio_format_t mInputFormat;
-        const audio_format_t mOutputFormat;
-    };
-
     // bitmask of allocated track names, where bit 0 corresponds to TRACK0 etc.
     uint32_t        mTrackNames;
 
diff --git a/services/audioflinger/BufferProviders.cpp b/services/audioflinger/BufferProviders.cpp
new file mode 100644
index 0000000..e058e6c
--- /dev/null
+++ b/services/audioflinger/BufferProviders.cpp
@@ -0,0 +1,524 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "BufferProvider"
+//#define LOG_NDEBUG 0
+
+#include <audio_effects/effect_downmix.h>
+#include <audio_utils/primitives.h>
+#include <audio_utils/format.h>
+#include <media/AudioResamplerPublic.h>
+#include <media/EffectsFactoryApi.h>
+
+#include <utils/Log.h>
+
+#include "Configuration.h"
+#include "BufferProviders.h"
+
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(x) (sizeof(x)/sizeof((x)[0]))
+#endif
+
+namespace android {
+
+// ----------------------------------------------------------------------------
+
+template <typename T>
+static inline T min(const T& a, const T& b)
+{
+    return a < b ? a : b;
+}
+
+CopyBufferProvider::CopyBufferProvider(size_t inputFrameSize,
+        size_t outputFrameSize, size_t bufferFrameCount) :
+        mInputFrameSize(inputFrameSize),
+        mOutputFrameSize(outputFrameSize),
+        mLocalBufferFrameCount(bufferFrameCount),
+        mLocalBufferData(NULL),
+        mConsumed(0)
+{
+    ALOGV("CopyBufferProvider(%p)(%zu, %zu, %zu)", this,
+            inputFrameSize, outputFrameSize, bufferFrameCount);
+    LOG_ALWAYS_FATAL_IF(inputFrameSize < outputFrameSize && bufferFrameCount == 0,
+            "Requires local buffer if inputFrameSize(%zu) < outputFrameSize(%zu)",
+            inputFrameSize, outputFrameSize);
+    if (mLocalBufferFrameCount) {
+        (void)posix_memalign(&mLocalBufferData, 32, mLocalBufferFrameCount * mOutputFrameSize);
+    }
+    mBuffer.frameCount = 0;
+}
+
+CopyBufferProvider::~CopyBufferProvider()
+{
+    ALOGV("~CopyBufferProvider(%p)", this);
+    if (mBuffer.frameCount != 0) {
+        mTrackBufferProvider->releaseBuffer(&mBuffer);
+    }
+    free(mLocalBufferData);
+}
+
+status_t CopyBufferProvider::getNextBuffer(AudioBufferProvider::Buffer *pBuffer,
+        int64_t pts)
+{
+    //ALOGV("CopyBufferProvider(%p)::getNextBuffer(%p (%zu), %lld)",
+    //        this, pBuffer, pBuffer->frameCount, pts);
+    if (mLocalBufferFrameCount == 0) {
+        status_t res = mTrackBufferProvider->getNextBuffer(pBuffer, pts);
+        if (res == OK) {
+            copyFrames(pBuffer->raw, pBuffer->raw, pBuffer->frameCount);
+        }
+        return res;
+    }
+    if (mBuffer.frameCount == 0) {
+        mBuffer.frameCount = pBuffer->frameCount;
+        status_t res = mTrackBufferProvider->getNextBuffer(&mBuffer, pts);
+        // At one time an upstream buffer provider had
+        // res == OK and mBuffer.frameCount == 0, doesn't seem to happen now 7/18/2014.
+        //
+        // By API spec, if res != OK, then mBuffer.frameCount == 0.
+        // but there may be improper implementations.
+        ALOG_ASSERT(res == OK || mBuffer.frameCount == 0);
+        if (res != OK || mBuffer.frameCount == 0) { // not needed by API spec, but to be safe.
+            pBuffer->raw = NULL;
+            pBuffer->frameCount = 0;
+            return res;
+        }
+        mConsumed = 0;
+    }
+    ALOG_ASSERT(mConsumed < mBuffer.frameCount);
+    size_t count = min(mLocalBufferFrameCount, mBuffer.frameCount - mConsumed);
+    count = min(count, pBuffer->frameCount);
+    pBuffer->raw = mLocalBufferData;
+    pBuffer->frameCount = count;
+    copyFrames(pBuffer->raw, (uint8_t*)mBuffer.raw + mConsumed * mInputFrameSize,
+            pBuffer->frameCount);
+    return OK;
+}
+
+void CopyBufferProvider::releaseBuffer(AudioBufferProvider::Buffer *pBuffer)
+{
+    //ALOGV("CopyBufferProvider(%p)::releaseBuffer(%p(%zu))",
+    //        this, pBuffer, pBuffer->frameCount);
+    if (mLocalBufferFrameCount == 0) {
+        mTrackBufferProvider->releaseBuffer(pBuffer);
+        return;
+    }
+    // LOG_ALWAYS_FATAL_IF(pBuffer->frameCount == 0, "Invalid framecount");
+    mConsumed += pBuffer->frameCount; // TODO: update for efficiency to reuse existing content
+    if (mConsumed != 0 && mConsumed >= mBuffer.frameCount) {
+        mTrackBufferProvider->releaseBuffer(&mBuffer);
+        ALOG_ASSERT(mBuffer.frameCount == 0);
+    }
+    pBuffer->raw = NULL;
+    pBuffer->frameCount = 0;
+}
+
+void CopyBufferProvider::reset()
+{
+    if (mBuffer.frameCount != 0) {
+        mTrackBufferProvider->releaseBuffer(&mBuffer);
+    }
+    mConsumed = 0;
+}
+
+DownmixerBufferProvider::DownmixerBufferProvider(
+        audio_channel_mask_t inputChannelMask,
+        audio_channel_mask_t outputChannelMask, audio_format_t format,
+        uint32_t sampleRate, int32_t sessionId, size_t bufferFrameCount) :
+        CopyBufferProvider(
+            audio_bytes_per_sample(format) * audio_channel_count_from_out_mask(inputChannelMask),
+            audio_bytes_per_sample(format) * audio_channel_count_from_out_mask(outputChannelMask),
+            bufferFrameCount)  // set bufferFrameCount to 0 to do in-place
+{
+    ALOGV("DownmixerBufferProvider(%p)(%#x, %#x, %#x %u %d)",
+            this, inputChannelMask, outputChannelMask, format,
+            sampleRate, sessionId);
+    if (!sIsMultichannelCapable
+            || EffectCreate(&sDwnmFxDesc.uuid,
+                    sessionId,
+                    SESSION_ID_INVALID_AND_IGNORED,
+                    &mDownmixHandle) != 0) {
+         ALOGE("DownmixerBufferProvider() error creating downmixer effect");
+         mDownmixHandle = NULL;
+         return;
+     }
+     // channel input configuration will be overridden per-track
+     mDownmixConfig.inputCfg.channels = inputChannelMask;   // FIXME: Should be bits
+     mDownmixConfig.outputCfg.channels = outputChannelMask; // FIXME: should be bits
+     mDownmixConfig.inputCfg.format = format;
+     mDownmixConfig.outputCfg.format = format;
+     mDownmixConfig.inputCfg.samplingRate = sampleRate;
+     mDownmixConfig.outputCfg.samplingRate = sampleRate;
+     mDownmixConfig.inputCfg.accessMode = EFFECT_BUFFER_ACCESS_READ;
+     mDownmixConfig.outputCfg.accessMode = EFFECT_BUFFER_ACCESS_WRITE;
+     // input and output buffer provider, and frame count will not be used as the downmix effect
+     // process() function is called directly (see DownmixerBufferProvider::getNextBuffer())
+     mDownmixConfig.inputCfg.mask = EFFECT_CONFIG_SMP_RATE | EFFECT_CONFIG_CHANNELS |
+             EFFECT_CONFIG_FORMAT | EFFECT_CONFIG_ACC_MODE;
+     mDownmixConfig.outputCfg.mask = mDownmixConfig.inputCfg.mask;
+
+     int cmdStatus;
+     uint32_t replySize = sizeof(int);
+
+     // Configure downmixer
+     status_t status = (*mDownmixHandle)->command(mDownmixHandle,
+             EFFECT_CMD_SET_CONFIG /*cmdCode*/, sizeof(effect_config_t) /*cmdSize*/,
+             &mDownmixConfig /*pCmdData*/,
+             &replySize, &cmdStatus /*pReplyData*/);
+     if (status != 0 || cmdStatus != 0) {
+         ALOGE("DownmixerBufferProvider() error %d cmdStatus %d while configuring downmixer",
+                 status, cmdStatus);
+         EffectRelease(mDownmixHandle);
+         mDownmixHandle = NULL;
+         return;
+     }
+
+     // Enable downmixer
+     replySize = sizeof(int);
+     status = (*mDownmixHandle)->command(mDownmixHandle,
+             EFFECT_CMD_ENABLE /*cmdCode*/, 0 /*cmdSize*/, NULL /*pCmdData*/,
+             &replySize, &cmdStatus /*pReplyData*/);
+     if (status != 0 || cmdStatus != 0) {
+         ALOGE("DownmixerBufferProvider() error %d cmdStatus %d while enabling downmixer",
+                 status, cmdStatus);
+         EffectRelease(mDownmixHandle);
+         mDownmixHandle = NULL;
+         return;
+     }
+
+     // Set downmix type
+     // parameter size rounded for padding on 32bit boundary
+     const int psizePadded = ((sizeof(downmix_params_t) - 1)/sizeof(int) + 1) * sizeof(int);
+     const int downmixParamSize =
+             sizeof(effect_param_t) + psizePadded + sizeof(downmix_type_t);
+     effect_param_t * const param = (effect_param_t *) malloc(downmixParamSize);
+     param->psize = sizeof(downmix_params_t);
+     const downmix_params_t downmixParam = DOWNMIX_PARAM_TYPE;
+     memcpy(param->data, &downmixParam, param->psize);
+     const downmix_type_t downmixType = DOWNMIX_TYPE_FOLD;
+     param->vsize = sizeof(downmix_type_t);
+     memcpy(param->data + psizePadded, &downmixType, param->vsize);
+     replySize = sizeof(int);
+     status = (*mDownmixHandle)->command(mDownmixHandle,
+             EFFECT_CMD_SET_PARAM /* cmdCode */, downmixParamSize /* cmdSize */,
+             param /*pCmdData*/, &replySize, &cmdStatus /*pReplyData*/);
+     free(param);
+     if (status != 0 || cmdStatus != 0) {
+         ALOGE("DownmixerBufferProvider() error %d cmdStatus %d while setting downmix type",
+                 status, cmdStatus);
+         EffectRelease(mDownmixHandle);
+         mDownmixHandle = NULL;
+         return;
+     }
+     ALOGV("DownmixerBufferProvider() downmix type set to %d", (int) downmixType);
+}
+
+DownmixerBufferProvider::~DownmixerBufferProvider()
+{
+    ALOGV("~DownmixerBufferProvider (%p)", this);
+    EffectRelease(mDownmixHandle);
+    mDownmixHandle = NULL;
+}
+
+void DownmixerBufferProvider::copyFrames(void *dst, const void *src, size_t frames)
+{
+    mDownmixConfig.inputCfg.buffer.frameCount = frames;
+    mDownmixConfig.inputCfg.buffer.raw = const_cast<void *>(src);
+    mDownmixConfig.outputCfg.buffer.frameCount = frames;
+    mDownmixConfig.outputCfg.buffer.raw = dst;
+    // may be in-place if src == dst.
+    status_t res = (*mDownmixHandle)->process(mDownmixHandle,
+            &mDownmixConfig.inputCfg.buffer, &mDownmixConfig.outputCfg.buffer);
+    ALOGE_IF(res != OK, "DownmixBufferProvider error %d", res);
+}
+
+/* call once in a pthread_once handler. */
+/*static*/ status_t DownmixerBufferProvider::init()
+{
+    // find multichannel downmix effect if we have to play multichannel content
+    uint32_t numEffects = 0;
+    int ret = EffectQueryNumberEffects(&numEffects);
+    if (ret != 0) {
+        ALOGE("AudioMixer() error %d querying number of effects", ret);
+        return NO_INIT;
+    }
+    ALOGV("EffectQueryNumberEffects() numEffects=%d", numEffects);
+
+    for (uint32_t i = 0 ; i < numEffects ; i++) {
+        if (EffectQueryEffect(i, &sDwnmFxDesc) == 0) {
+            ALOGV("effect %d is called %s", i, sDwnmFxDesc.name);
+            if (memcmp(&sDwnmFxDesc.type, EFFECT_UIID_DOWNMIX, sizeof(effect_uuid_t)) == 0) {
+                ALOGI("found effect \"%s\" from %s",
+                        sDwnmFxDesc.name, sDwnmFxDesc.implementor);
+                sIsMultichannelCapable = true;
+                break;
+            }
+        }
+    }
+    ALOGW_IF(!sIsMultichannelCapable, "unable to find downmix effect");
+    return NO_INIT;
+}
+
+/*static*/ bool DownmixerBufferProvider::sIsMultichannelCapable = false;
+/*static*/ effect_descriptor_t DownmixerBufferProvider::sDwnmFxDesc;
+
+RemixBufferProvider::RemixBufferProvider(audio_channel_mask_t inputChannelMask,
+        audio_channel_mask_t outputChannelMask, audio_format_t format,
+        size_t bufferFrameCount) :
+        CopyBufferProvider(
+                audio_bytes_per_sample(format)
+                    * audio_channel_count_from_out_mask(inputChannelMask),
+                audio_bytes_per_sample(format)
+                    * audio_channel_count_from_out_mask(outputChannelMask),
+                bufferFrameCount),
+        mFormat(format),
+        mSampleSize(audio_bytes_per_sample(format)),
+        mInputChannels(audio_channel_count_from_out_mask(inputChannelMask)),
+        mOutputChannels(audio_channel_count_from_out_mask(outputChannelMask))
+{
+    ALOGV("RemixBufferProvider(%p)(%#x, %#x, %#x) %zu %zu",
+            this, format, inputChannelMask, outputChannelMask,
+            mInputChannels, mOutputChannels);
+
+    const audio_channel_representation_t inputRepresentation =
+            audio_channel_mask_get_representation(inputChannelMask);
+    const audio_channel_representation_t outputRepresentation =
+            audio_channel_mask_get_representation(outputChannelMask);
+    const uint32_t inputBits = audio_channel_mask_get_bits(inputChannelMask);
+    const uint32_t outputBits = audio_channel_mask_get_bits(outputChannelMask);
+
+    switch (inputRepresentation) {
+    case AUDIO_CHANNEL_REPRESENTATION_POSITION:
+        switch (outputRepresentation) {
+        case AUDIO_CHANNEL_REPRESENTATION_POSITION:
+            memcpy_by_index_array_initialization(mIdxAry, ARRAY_SIZE(mIdxAry),
+                    outputBits, inputBits);
+            return;
+        case AUDIO_CHANNEL_REPRESENTATION_INDEX:
+            // TODO: output channel index mask not currently allowed
+            // fall through
+        default:
+            break;
+        }
+        break;
+    case AUDIO_CHANNEL_REPRESENTATION_INDEX:
+        switch (outputRepresentation) {
+        case AUDIO_CHANNEL_REPRESENTATION_POSITION:
+            memcpy_by_index_array_initialization_src_index(mIdxAry, ARRAY_SIZE(mIdxAry),
+                    outputBits, inputBits);
+            return;
+        case AUDIO_CHANNEL_REPRESENTATION_INDEX:
+            // TODO: output channel index mask not currently allowed
+            // fall through
+        default:
+            break;
+        }
+        break;
+    default:
+        break;
+    }
+    LOG_ALWAYS_FATAL("invalid channel mask conversion from %#x to %#x",
+            inputChannelMask, outputChannelMask);
+}
+
+void RemixBufferProvider::copyFrames(void *dst, const void *src, size_t frames)
+{
+    memcpy_by_index_array(dst, mOutputChannels,
+            src, mInputChannels, mIdxAry, mSampleSize, frames);
+}
+
+ReformatBufferProvider::ReformatBufferProvider(int32_t channelCount,
+        audio_format_t inputFormat, audio_format_t outputFormat,
+        size_t bufferFrameCount) :
+        CopyBufferProvider(
+                channelCount * audio_bytes_per_sample(inputFormat),
+                channelCount * audio_bytes_per_sample(outputFormat),
+                bufferFrameCount),
+        mChannelCount(channelCount),
+        mInputFormat(inputFormat),
+        mOutputFormat(outputFormat)
+{
+    ALOGV("ReformatBufferProvider(%p)(%u, %#x, %#x)",
+            this, channelCount, inputFormat, outputFormat);
+}
+
+void ReformatBufferProvider::copyFrames(void *dst, const void *src, size_t frames)
+{
+    memcpy_by_audio_format(dst, mOutputFormat, src, mInputFormat, frames * mChannelCount);
+}
+
+TimestretchBufferProvider::TimestretchBufferProvider(int32_t channelCount,
+        audio_format_t format, uint32_t sampleRate, float speed, float pitch) :
+        mChannelCount(channelCount),
+        mFormat(format),
+        mSampleRate(sampleRate),
+        mFrameSize(channelCount * audio_bytes_per_sample(format)),
+        mSpeed(speed),
+        mPitch(pitch),
+        mLocalBufferFrameCount(0),
+        mLocalBufferData(NULL),
+        mRemaining(0)
+{
+    ALOGV("TimestretchBufferProvider(%p)(%u, %#x, %u %f %f)",
+            this, channelCount, format, sampleRate, speed, pitch);
+    mBuffer.frameCount = 0;
+}
+
+TimestretchBufferProvider::~TimestretchBufferProvider()
+{
+    ALOGV("~TimestretchBufferProvider(%p)", this);
+    if (mBuffer.frameCount != 0) {
+        mTrackBufferProvider->releaseBuffer(&mBuffer);
+    }
+    free(mLocalBufferData);
+}
+
+status_t TimestretchBufferProvider::getNextBuffer(
+        AudioBufferProvider::Buffer *pBuffer, int64_t pts)
+{
+    ALOGV("TimestretchBufferProvider(%p)::getNextBuffer(%p (%zu), %lld)",
+            this, pBuffer, pBuffer->frameCount, pts);
+
+    // BYPASS
+    //return mTrackBufferProvider->getNextBuffer(pBuffer, pts);
+
+    // check if previously processed data is sufficient.
+    if (pBuffer->frameCount <= mRemaining) {
+        ALOGV("previous sufficient");
+        pBuffer->raw = mLocalBufferData;
+        return OK;
+    }
+
+    // do we need to resize our buffer?
+    if (pBuffer->frameCount > mLocalBufferFrameCount) {
+        void *newmem;
+        if (posix_memalign(&newmem, 32, pBuffer->frameCount * mFrameSize) == OK) {
+            if (mRemaining != 0) {
+                memcpy(newmem, mLocalBufferData, mRemaining * mFrameSize);
+            }
+            free(mLocalBufferData);
+            mLocalBufferData = newmem;
+            mLocalBufferFrameCount = pBuffer->frameCount;
+        }
+    }
+
+    // need to fetch more data
+    const size_t outputDesired = pBuffer->frameCount - mRemaining;
+    mBuffer.frameCount = mSpeed == AUDIO_TIMESTRETCH_SPEED_NORMAL
+            ? outputDesired : outputDesired * mSpeed + 1;
+
+    status_t res = mTrackBufferProvider->getNextBuffer(&mBuffer, pts);
+
+    ALOG_ASSERT(res == OK || mBuffer.frameCount == 0);
+    if (res != OK || mBuffer.frameCount == 0) { // not needed by API spec, but to be safe.
+        ALOGD("buffer error");
+        if (mRemaining == 0) {
+            pBuffer->raw = NULL;
+            pBuffer->frameCount = 0;
+            return res;
+        } else { // return partial count
+            pBuffer->raw = mLocalBufferData;
+            pBuffer->frameCount = mRemaining;
+            return OK;
+        }
+    }
+
+    // time-stretch the data
+    size_t dstAvailable = min(mLocalBufferFrameCount - mRemaining, outputDesired);
+    size_t srcAvailable = mBuffer.frameCount;
+    processFrames((uint8_t*)mLocalBufferData + mRemaining * mFrameSize, &dstAvailable,
+            mBuffer.raw, &srcAvailable);
+
+    // release all data consumed
+    mBuffer.frameCount = srcAvailable;
+    mTrackBufferProvider->releaseBuffer(&mBuffer);
+
+    // update buffer vars with the actual data processed and return with buffer
+    mRemaining += dstAvailable;
+
+    pBuffer->raw = mLocalBufferData;
+    pBuffer->frameCount = mRemaining;
+
+    return OK;
+}
+
+void TimestretchBufferProvider::releaseBuffer(AudioBufferProvider::Buffer *pBuffer)
+{
+    ALOGV("TimestretchBufferProvider(%p)::releaseBuffer(%p (%zu))",
+       this, pBuffer, pBuffer->frameCount);
+
+    // BYPASS
+    //return mTrackBufferProvider->releaseBuffer(pBuffer);
+
+    // LOG_ALWAYS_FATAL_IF(pBuffer->frameCount == 0, "Invalid framecount");
+    if (pBuffer->frameCount < mRemaining) {
+        memcpy(mLocalBufferData,
+                (uint8_t*)mLocalBufferData + pBuffer->frameCount * mFrameSize,
+                (mRemaining - pBuffer->frameCount) * mFrameSize);
+        mRemaining -= pBuffer->frameCount;
+    } else if (pBuffer->frameCount == mRemaining) {
+        mRemaining = 0;
+    } else {
+        LOG_ALWAYS_FATAL("Releasing more frames(%zu) than available(%zu)",
+                pBuffer->frameCount, mRemaining);
+    }
+
+    pBuffer->raw = NULL;
+    pBuffer->frameCount = 0;
+}
+
+void TimestretchBufferProvider::reset()
+{
+    mRemaining = 0;
+}
+
+status_t TimestretchBufferProvider::setPlaybackRate(float speed, float pitch)
+{
+    mSpeed = speed;
+    mPitch = pitch;
+    return OK;
+}
+
+void TimestretchBufferProvider::processFrames(void *dstBuffer, size_t *dstFrames,
+        const void *srcBuffer, size_t *srcFrames)
+{
+    ALOGV("processFrames(%zu %zu)  remaining(%zu)", *dstFrames, *srcFrames, mRemaining);
+    // Note dstFrames is the required number of frames.
+
+    // Ensure consumption from src is as expected.
+    const size_t targetSrc = *dstFrames * mSpeed;
+    if (*srcFrames < targetSrc) { // limit dst frames to that possible
+        *dstFrames = *srcFrames / mSpeed;
+    } else if (*srcFrames > targetSrc + 1) {
+        *srcFrames = targetSrc + 1;
+    }
+
+    // Do the time stretch by memory copy without any local buffer.
+    if (*dstFrames <= *srcFrames) {
+        size_t copySize = mFrameSize * *dstFrames;
+        memcpy(dstBuffer, srcBuffer, copySize);
+    } else {
+        // cyclically repeat the source.
+        for (size_t count = 0; count < *dstFrames; count += *srcFrames) {
+            size_t remaining = min(*srcFrames, *dstFrames - count);
+            memcpy((uint8_t*)dstBuffer + mFrameSize * count,
+                    srcBuffer, mFrameSize * *srcFrames);
+        }
+    }
+}
+
+// ----------------------------------------------------------------------------
+} // namespace android
diff --git a/services/audioflinger/BufferProviders.h b/services/audioflinger/BufferProviders.h
new file mode 100644
index 0000000..2b6ea47
--- /dev/null
+++ b/services/audioflinger/BufferProviders.h
@@ -0,0 +1,191 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_BUFFER_PROVIDERS_H
+#define ANDROID_BUFFER_PROVIDERS_H
+
+#include <stdint.h>
+#include <sys/types.h>
+
+#include <hardware/audio_effect.h>
+#include <media/AudioBufferProvider.h>
+#include <system/audio.h>
+
+namespace android {
+
+// ----------------------------------------------------------------------------
+
+class PassthruBufferProvider : public AudioBufferProvider {
+public:
+    PassthruBufferProvider() : mTrackBufferProvider(NULL) { }
+
+    virtual ~PassthruBufferProvider() { }
+
+    // call this to release the buffer to the upstream provider.
+    // treat it as an audio discontinuity for future samples.
+    virtual void reset() { }
+
+    // set the upstream buffer provider. Consider calling "reset" before this function.
+    virtual void setBufferProvider(AudioBufferProvider *p) {
+        mTrackBufferProvider = p;
+    }
+
+protected:
+    AudioBufferProvider *mTrackBufferProvider;
+};
+
+// Base AudioBufferProvider class used for DownMixerBufferProvider, RemixBufferProvider,
+// and ReformatBufferProvider.
+// It handles a private buffer for use in converting format or channel masks from the
+// input data to a form acceptable by the mixer.
+// TODO: Make a ResamplerBufferProvider when integers are entirely removed from the
+// processing pipeline.
+class CopyBufferProvider : public PassthruBufferProvider {
+public:
+    // Use a private buffer of bufferFrameCount frames (each frame is outputFrameSize bytes).
+    // If bufferFrameCount is 0, no private buffer is created and in-place modification of
+    // the upstream buffer provider's buffers is performed by copyFrames().
+    CopyBufferProvider(size_t inputFrameSize, size_t outputFrameSize,
+            size_t bufferFrameCount);
+    virtual ~CopyBufferProvider();
+
+    // Overrides AudioBufferProvider methods
+    virtual status_t getNextBuffer(Buffer *buffer, int64_t pts);
+    virtual void releaseBuffer(Buffer *buffer);
+
+    // Overrides PassthruBufferProvider
+    virtual void reset();
+
+    // this function should be supplied by the derived class.  It converts
+    // #frames in the *src pointer to the *dst pointer.  It is public because
+    // some providers will allow this to work on arbitrary buffers outside
+    // of the internal buffers.
+    virtual void copyFrames(void *dst, const void *src, size_t frames) = 0;
+
+protected:
+    const size_t         mInputFrameSize;
+    const size_t         mOutputFrameSize;
+private:
+    AudioBufferProvider::Buffer mBuffer;
+    const size_t         mLocalBufferFrameCount;
+    void                *mLocalBufferData;
+    size_t               mConsumed;
+};
+
+// DownmixerBufferProvider derives from CopyBufferProvider to provide
+// position dependent downmixing by an Audio Effect.
+class DownmixerBufferProvider : public CopyBufferProvider {
+public:
+    DownmixerBufferProvider(audio_channel_mask_t inputChannelMask,
+            audio_channel_mask_t outputChannelMask, audio_format_t format,
+            uint32_t sampleRate, int32_t sessionId, size_t bufferFrameCount);
+    virtual ~DownmixerBufferProvider();
+    //Overrides
+    virtual void copyFrames(void *dst, const void *src, size_t frames);
+
+    bool isValid() const { return mDownmixHandle != NULL; }
+    static status_t init();
+    static bool isMultichannelCapable() { return sIsMultichannelCapable; }
+
+protected:
+    effect_handle_t    mDownmixHandle;
+    effect_config_t    mDownmixConfig;
+
+    // effect descriptor for the downmixer used by the mixer
+    static effect_descriptor_t sDwnmFxDesc;
+    // indicates whether a downmix effect has been found and is usable by this mixer
+    static bool                sIsMultichannelCapable;
+    // FIXME: should we allow effects outside of the framework?
+    // We need to here. A special ioId that must be <= -2 so it does not map to a session.
+    static const int32_t SESSION_ID_INVALID_AND_IGNORED = -2;
+};
+
+// RemixBufferProvider derives from CopyBufferProvider to perform an
+// upmix or downmix to the proper channel count and mask.
+class RemixBufferProvider : public CopyBufferProvider {
+public:
+    RemixBufferProvider(audio_channel_mask_t inputChannelMask,
+            audio_channel_mask_t outputChannelMask, audio_format_t format,
+            size_t bufferFrameCount);
+    //Overrides
+    virtual void copyFrames(void *dst, const void *src, size_t frames);
+
+protected:
+    const audio_format_t mFormat;
+    const size_t         mSampleSize;
+    const size_t         mInputChannels;
+    const size_t         mOutputChannels;
+    int8_t               mIdxAry[sizeof(uint32_t) * 8]; // 32 bits => channel indices
+};
+
+// ReformatBufferProvider derives from CopyBufferProvider to convert the input data
+// to an acceptable mixer input format type.
+class ReformatBufferProvider : public CopyBufferProvider {
+public:
+    ReformatBufferProvider(int32_t channelCount,
+            audio_format_t inputFormat, audio_format_t outputFormat,
+            size_t bufferFrameCount);
+    virtual void copyFrames(void *dst, const void *src, size_t frames);
+
+protected:
+    const uint32_t       mChannelCount;
+    const audio_format_t mInputFormat;
+    const audio_format_t mOutputFormat;
+};
+
+// TimestretchBufferProvider derives from PassthruBufferProvider for time stretching
+class TimestretchBufferProvider : public PassthruBufferProvider {
+public:
+    TimestretchBufferProvider(int32_t channelCount,
+            audio_format_t format, uint32_t sampleRate, float speed, float pitch);
+    virtual ~TimestretchBufferProvider();
+
+    // Overrides AudioBufferProvider methods
+    virtual status_t getNextBuffer(Buffer* buffer, int64_t pts);
+    virtual void releaseBuffer(Buffer* buffer);
+
+    // Overrides PassthruBufferProvider
+    virtual void reset();
+
+    virtual status_t setPlaybackRate(float speed, float pitch);
+
+    // processes frames
+    // dstBuffer is where to place the data
+    // dstFrames [in/out] is the desired frames (return with actual placed in buffer)
+    // srcBuffer is the source data
+    // srcFrames [in/out] is the available source frames (return with consumed)
+    virtual void processFrames(void *dstBuffer, size_t *dstFrames,
+            const void *srcBuffer, size_t *srcFrames);
+
+protected:
+    const uint32_t       mChannelCount;
+    const audio_format_t mFormat;
+    const uint32_t       mSampleRate; // const for now (TODO change this)
+    const size_t         mFrameSize;
+    float                mSpeed;
+    float                mPitch;
+
+private:
+    AudioBufferProvider::Buffer mBuffer;
+    size_t               mLocalBufferFrameCount;
+    void                *mLocalBufferData;
+    size_t               mRemaining;
+};
+
+// ----------------------------------------------------------------------------
+} // namespace android
+
+#endif // ANDROID_BUFFER_PROVIDERS_H
diff --git a/services/audioflinger/SpdifStreamOut.cpp b/services/audioflinger/SpdifStreamOut.cpp
index d23588e..45b541a 100644
--- a/services/audioflinger/SpdifStreamOut.cpp
+++ b/services/audioflinger/SpdifStreamOut.cpp
@@ -32,10 +32,12 @@
  * If the AudioFlinger is processing encoded data and the HAL expects
  * PCM then we need to wrap the data in an SPDIF wrapper.
  */
-SpdifStreamOut::SpdifStreamOut(AudioHwDevice *dev, audio_output_flags_t flags)
+SpdifStreamOut::SpdifStreamOut(AudioHwDevice *dev,
+            audio_output_flags_t flags,
+            audio_format_t format)
         : AudioStreamOut(dev,flags)
         , mRateMultiplier(1)
-        , mSpdifEncoder(this)
+        , mSpdifEncoder(this, format)
         , mRenderPositionHal(0)
         , mPreviousHalPosition32(0)
 {
@@ -49,15 +51,15 @@
 {
     struct audio_config customConfig = *config;
 
-    customConfig.format = AUDIO_FORMAT_PCM_16_BIT;
-    customConfig.channel_mask = AUDIO_CHANNEL_OUT_STEREO;
-
     // Some data bursts run at a higher sample rate.
+    // TODO Move this into the audio_utils as a static method.
     switch(config->format) {
         case AUDIO_FORMAT_E_AC3:
             mRateMultiplier = 4;
             break;
         case AUDIO_FORMAT_AC3:
+        case AUDIO_FORMAT_DTS:
+        case AUDIO_FORMAT_DTS_HD:
             mRateMultiplier = 1;
             break;
         default:
@@ -67,6 +69,9 @@
     }
     customConfig.sample_rate = config->sample_rate * mRateMultiplier;
 
+    customConfig.format = AUDIO_FORMAT_PCM_16_BIT;
+    customConfig.channel_mask = AUDIO_CHANNEL_OUT_STEREO;
+
     // Always print this because otherwise it could be very confusing if the
     // HAL and AudioFlinger are using different formats.
     // Print before open() because HAL may modify customConfig.
diff --git a/services/audioflinger/SpdifStreamOut.h b/services/audioflinger/SpdifStreamOut.h
index cb82ac7..d81c064 100644
--- a/services/audioflinger/SpdifStreamOut.h
+++ b/services/audioflinger/SpdifStreamOut.h
@@ -38,7 +38,8 @@
 class SpdifStreamOut : public AudioStreamOut {
 public:
 
-    SpdifStreamOut(AudioHwDevice *dev, audio_output_flags_t flags);
+    SpdifStreamOut(AudioHwDevice *dev, audio_output_flags_t flags,
+            audio_format_t format);
 
     virtual ~SpdifStreamOut() { }
 
@@ -77,8 +78,9 @@
     class MySPDIFEncoder : public SPDIFEncoder
     {
     public:
-        MySPDIFEncoder(SpdifStreamOut *spdifStreamOut)
-          : mSpdifStreamOut(spdifStreamOut)
+        MySPDIFEncoder(SpdifStreamOut *spdifStreamOut, audio_format_t format)
+          :  SPDIFEncoder(format)
+          , mSpdifStreamOut(spdifStreamOut)
         {
         }
 
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 1a20fae..b30fd20 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -1608,13 +1608,19 @@
     // If you change this calculation, also review the start threshold which is related.
     if (!(*flags & IAudioFlinger::TRACK_FAST)
             && audio_is_linear_pcm(format) && sharedBuffer == 0) {
+        // this must match AudioTrack.cpp calculateMinFrameCount().
+        // TODO: Move to a common library
         uint32_t latencyMs = mOutput->stream->get_latency(mOutput->stream);
         uint32_t minBufCount = latencyMs / ((1000 * mNormalFrameCount) / mSampleRate);
         if (minBufCount < 2) {
             minBufCount = 2;
         }
+        // For normal mixing tracks, if speed is > 1.0f (normal), AudioTrack
+        // or the client should compute and pass in a larger buffer request.
         size_t minFrameCount =
-                minBufCount * sourceFramesNeeded(sampleRate, mNormalFrameCount, mSampleRate);
+                minBufCount * sourceFramesNeededWithTimestretch(
+                        sampleRate, mNormalFrameCount,
+                        mSampleRate, AUDIO_TIMESTRETCH_SPEED_NORMAL /*speed*/);
         if (frameCount < minFrameCount) { // including frameCount == 0
             frameCount = minFrameCount;
         }
@@ -3592,21 +3598,17 @@
         // hence the test on (mMixerStatus == MIXER_TRACKS_READY) meaning the track was mixed
         // during last round
         size_t desiredFrames;
-        uint32_t sr = track->sampleRate();
-        if (sr == mSampleRate) {
-            desiredFrames = mNormalFrameCount;
-        } else {
-            desiredFrames = sourceFramesNeeded(sr, mNormalFrameCount, mSampleRate);
-            // add frames already consumed but not yet released by the resampler
-            // because mAudioTrackServerProxy->framesReady() will include these frames
-            desiredFrames += mAudioMixer->getUnreleasedFrames(track->name());
-#if 0
-            // the minimum track buffer size is normally twice the number of frames necessary
-            // to fill one buffer and the resampler should not leave more than one buffer worth
-            // of unreleased frames after each pass, but just in case...
-            ALOG_ASSERT(desiredFrames <= cblk->frameCount_);
-#endif
-        }
+        const uint32_t sampleRate = track->mAudioTrackServerProxy->getSampleRate();
+        float speed, pitch;
+        track->mAudioTrackServerProxy->getPlaybackRate(&speed, &pitch);
+
+        desiredFrames = sourceFramesNeededWithTimestretch(
+                sampleRate, mNormalFrameCount, mSampleRate, speed);
+        // TODO: ONLY USED FOR LEGACY RESAMPLERS, remove when they are removed.
+        // add frames already consumed but not yet released by the resampler
+        // because mAudioTrackServerProxy->framesReady() will include these frames
+        desiredFrames += mAudioMixer->getUnreleasedFrames(track->name());
+
         uint32_t minFrames = 1;
         if ((track->sharedBuffer() == 0) && !track->isStopped() && !track->isPausing() &&
                 (mMixerStatusIgnoringFastTracks == MIXER_TRACKS_READY)) {
@@ -3769,6 +3771,17 @@
                 AudioMixer::RESAMPLE,
                 AudioMixer::SAMPLE_RATE,
                 (void *)(uintptr_t)reqSampleRate);
+
+            // set the playback rate as an float array {speed, pitch}
+            float playbackRate[2];
+            track->mAudioTrackServerProxy->getPlaybackRate(
+                    &playbackRate[0] /*speed*/, &playbackRate[1] /*pitch*/);
+            mAudioMixer->setParameter(
+                name,
+                AudioMixer::TIMESTRETCH,
+                AudioMixer::PLAYBACK_RATE,
+                playbackRate);
+
             /*
              * Select the appropriate output buffer for the track.
              *
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index 1566b1f..da2d634 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -903,9 +903,14 @@
             mPreviousTimestampValid = false;
             return INVALID_OPERATION;
         }
+        // FIXME Not accurate under dynamic changes of sample rate and speed.
+        // Do not use track's mSampleRate as it is not current for mixer tracks.
+        uint32_t sampleRate = mAudioTrackServerProxy->getSampleRate();
+        float speed, pitch;
+        mAudioTrackServerProxy->getPlaybackRate(&speed, &pitch);
         uint32_t unpresentedFrames =
-                ((int64_t) playbackThread->mLatchQ.mUnpresentedFrames * mSampleRate) /
-                playbackThread->mSampleRate;
+                ((double) playbackThread->mLatchQ.mUnpresentedFrames * sampleRate * speed)
+                / playbackThread->mSampleRate;
         // FIXME Since we're using a raw pointer as the key, it is theoretically possible
         //       for a brand new track to share the same address as a recently destroyed
         //       track, and thus for us to get the frames released of the wrong track.
diff --git a/services/audioflinger/tests/Android.mk b/services/audioflinger/tests/Android.mk
index 8604ef5..76997be 100644
--- a/services/audioflinger/tests/Android.mk
+++ b/services/audioflinger/tests/Android.mk
@@ -39,6 +39,7 @@
 LOCAL_SRC_FILES:= \
 	test-mixer.cpp \
 	../AudioMixer.cpp.arm \
+	../BufferProviders.cpp
 
 LOCAL_C_INCLUDES := \
 	$(call include-path-for, audio-effects) \
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioPort.h b/services/audiopolicy/common/managerdefinitions/include/AudioPort.h
index dea1b8a..1c2c27e 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioPort.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioPort.h
@@ -62,8 +62,12 @@
     // searches for an exact match
     status_t checkExactChannelMask(audio_channel_mask_t channelMask) const;
     // searches for a compatible match, currently implemented for input channel masks only
-    status_t checkCompatibleChannelMask(audio_channel_mask_t channelMask) const;
-    status_t checkFormat(audio_format_t format) const;
+    status_t checkCompatibleChannelMask(audio_channel_mask_t channelMask,
+            audio_channel_mask_t *updatedChannelMask) const;
+
+    status_t checkExactFormat(audio_format_t format) const;
+    // searches for a compatible match, currently implemented for input formats only
+    status_t checkCompatibleFormat(audio_format_t format, audio_format_t *updatedFormat) const;
     status_t checkGain(const struct audio_gain_config *gainConfig, int index) const;
 
     uint32_t pickSamplingRate() const;
@@ -71,6 +75,11 @@
     audio_format_t pickFormat() const;
 
     static const audio_format_t sPcmFormatCompareTable[];
+    static int compareFormatsGoodToBad(
+            const audio_format_t *format1, const audio_format_t *format2) {
+        // compareFormats sorts from bad to good, we reverse it here
+        return compareFormats(*format2, *format1);
+    }
     static int compareFormats(audio_format_t format1, audio_format_t format2);
 
     audio_module_handle_t getModuleHandle() const;
diff --git a/services/audiopolicy/common/managerdefinitions/include/IOProfile.h b/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
index 022257e..ab6fcc1 100644
--- a/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
+++ b/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
@@ -45,7 +45,9 @@
                              uint32_t samplingRate,
                              uint32_t *updatedSamplingRate,
                              audio_format_t format,
+                             audio_format_t *updatedFormat,
                              audio_channel_mask_t channelMask,
+                             audio_channel_mask_t *updatedChannelMask,
                              uint32_t flags) const;
 
     void dump(int fd);
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp
index e8191dd..f3978ec 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp
@@ -16,7 +16,7 @@
 
 #define LOG_TAG "APM::AudioPort"
 //#define LOG_NDEBUG 0
-
+#include <media/AudioResamplerPublic.h>
 #include "AudioPort.h"
 #include "HwModule.h"
 #include "AudioGain.h"
@@ -216,6 +216,7 @@
         }
         str = strtok(NULL, "|");
     }
+    mFormats.sort(compareFormatsGoodToBad);
 }
 
 void AudioPort::loadInChannels(char *name)
@@ -358,6 +359,9 @@
         uint32_t *updatedSamplingRate) const
 {
     if (mSamplingRates.isEmpty()) {
+        if (updatedSamplingRate != NULL) {
+            *updatedSamplingRate = samplingRate;
+        }
         return NO_ERROR;
     }
 
@@ -387,16 +391,11 @@
             }
         }
     }
-    // This uses hard-coded knowledge about AudioFlinger resampling ratios.
-    // TODO Move these assumptions out.
-    static const uint32_t kMaxDownSampleRatio = 6;  // beyond this aliasing occurs
-    static const uint32_t kMaxUpSampleRatio = 256;  // beyond this sample rate inaccuracies occur
-                                                    // due to approximation by an int32_t of the
-                                                    // phase increments
+
     // Prefer to down-sample from a higher sampling rate, as we get the desired frequency spectrum.
     if (minAbove >= 0) {
         candidate = mSamplingRates[minAbove];
-        if (candidate / kMaxDownSampleRatio <= samplingRate) {
+        if (candidate / AUDIO_RESAMPLER_DOWN_RATIO_MAX <= samplingRate) {
             if (updatedSamplingRate != NULL) {
                 *updatedSamplingRate = candidate;
             }
@@ -406,7 +405,7 @@
     // But if we have to up-sample from a lower sampling rate, that's OK.
     if (maxBelow >= 0) {
         candidate = mSamplingRates[maxBelow];
-        if (candidate * kMaxUpSampleRatio >= samplingRate) {
+        if (candidate * AUDIO_RESAMPLER_UP_RATIO_MAX >= samplingRate) {
             if (updatedSamplingRate != NULL) {
                 *updatedSamplingRate = candidate;
             }
@@ -431,10 +430,13 @@
     return BAD_VALUE;
 }
 
-status_t AudioPort::checkCompatibleChannelMask(audio_channel_mask_t channelMask)
-        const
+status_t AudioPort::checkCompatibleChannelMask(audio_channel_mask_t channelMask,
+        audio_channel_mask_t *updatedChannelMask) const
 {
     if (mChannelMasks.isEmpty()) {
+        if (updatedChannelMask != NULL) {
+            *updatedChannelMask = channelMask;
+        }
         return NO_ERROR;
     }
 
@@ -443,6 +445,9 @@
         // FIXME Does not handle multi-channel automatic conversions yet
         audio_channel_mask_t supported = mChannelMasks[i];
         if (supported == channelMask) {
+            if (updatedChannelMask != NULL) {
+                *updatedChannelMask = channelMask;
+            }
             return NO_ERROR;
         }
         if (isRecordThread) {
@@ -452,6 +457,9 @@
                     && channelMask == AUDIO_CHANNEL_IN_MONO) ||
                 (supported == AUDIO_CHANNEL_IN_MONO && (channelMask == AUDIO_CHANNEL_IN_FRONT_BACK
                     || channelMask == AUDIO_CHANNEL_IN_STEREO))) {
+                if (updatedChannelMask != NULL) {
+                    *updatedChannelMask = supported;
+                }
                 return NO_ERROR;
             }
         }
@@ -459,7 +467,7 @@
     return BAD_VALUE;
 }
 
-status_t AudioPort::checkFormat(audio_format_t format) const
+status_t AudioPort::checkExactFormat(audio_format_t format) const
 {
     if (mFormats.isEmpty()) {
         return NO_ERROR;
@@ -473,6 +481,33 @@
     return BAD_VALUE;
 }
 
+status_t AudioPort::checkCompatibleFormat(audio_format_t format, audio_format_t *updatedFormat)
+        const
+{
+    if (mFormats.isEmpty()) {
+        if (updatedFormat != NULL) {
+            *updatedFormat = format;
+        }
+        return NO_ERROR;
+    }
+
+    const bool checkInexact = // when port is input and format is linear pcm
+            mType == AUDIO_PORT_TYPE_MIX && mRole == AUDIO_PORT_ROLE_SINK
+            && audio_is_linear_pcm(format);
+
+    for (size_t i = 0; i < mFormats.size(); ++i) {
+        if (mFormats[i] == format ||
+                (checkInexact && audio_is_linear_pcm(mFormats[i]))) {
+            // for inexact checks we take the first linear pcm format since
+            // mFormats is sorted from best PCM format to worst PCM format.
+            if (updatedFormat != NULL) {
+                *updatedFormat = mFormats[i];
+            }
+            return NO_ERROR;
+        }
+    }
+    return BAD_VALUE;
+}
 
 uint32_t AudioPort::pickSamplingRate() const
 {
@@ -756,7 +791,7 @@
         mChannelMask = config->channel_mask;
     }
     if (config->config_mask & AUDIO_PORT_CONFIG_FORMAT) {
-        status = audioport->checkFormat(config->format);
+        status = audioport->checkExactFormat(config->format);
         if (status != NO_ERROR) {
             goto exit;
         }
diff --git a/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp b/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
index de6539c..7b6d51d 100644
--- a/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
@@ -40,7 +40,9 @@
                                     uint32_t samplingRate,
                                     uint32_t *updatedSamplingRate,
                                     audio_format_t format,
+                                    audio_format_t *updatedFormat,
                                     audio_channel_mask_t channelMask,
+                                    audio_channel_mask_t *updatedChannelMask,
                                     uint32_t flags) const
 {
     const bool isPlaybackThread = mType == AUDIO_PORT_TYPE_MIX && mRole == AUDIO_PORT_ROLE_SOURCE;
@@ -71,7 +73,14 @@
          return false;
     }
 
-    if (!audio_is_valid_format(format) || checkFormat(format) != NO_ERROR) {
+    if (!audio_is_valid_format(format)) {
+        return false;
+    }
+    if (isPlaybackThread && checkExactFormat(format) != NO_ERROR) {
+        return false;
+    }
+    audio_format_t myUpdatedFormat = format;
+    if (isRecordThread && checkCompatibleFormat(format, &myUpdatedFormat) != NO_ERROR) {
         return false;
     }
 
@@ -79,8 +88,9 @@
             checkExactChannelMask(channelMask) != NO_ERROR)) {
         return false;
     }
+    audio_channel_mask_t myUpdatedChannelMask = channelMask;
     if (isRecordThread && (!audio_is_input_channel(channelMask) ||
-            checkCompatibleChannelMask(channelMask) != NO_ERROR)) {
+            checkCompatibleChannelMask(channelMask, &myUpdatedChannelMask) != NO_ERROR)) {
         return false;
     }
 
@@ -99,6 +109,12 @@
     if (updatedSamplingRate != NULL) {
         *updatedSamplingRate = myUpdatedSamplingRate;
     }
+    if (updatedFormat != NULL) {
+        *updatedFormat = myUpdatedFormat;
+    }
+    if (updatedChannelMask != NULL) {
+        *updatedChannelMask = myUpdatedChannelMask;
+    }
     return true;
 }
 
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index 35e80f7..ba9f996 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -585,8 +585,10 @@
         }
         for (size_t j = 0; j < mHwModules[i]->mOutputProfiles.size(); j++) {
             sp<IOProfile> profile = mHwModules[i]->mOutputProfiles[j];
-            bool found = profile->isCompatibleProfile(device, String8(""), samplingRate,
-                    NULL /*updatedSamplingRate*/, format, channelMask,
+            bool found = profile->isCompatibleProfile(device, String8(""),
+                    samplingRate, NULL /*updatedSamplingRate*/,
+                    format, NULL /*updatedFormat*/,
+                    channelMask, NULL /*updatedChannelMask*/,
                     flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD ?
                         AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD : AUDIO_OUTPUT_FLAG_DIRECT);
             if (found && (mAvailableOutputDevices.types() & profile->mSupportedDevices.types())) {
@@ -1303,20 +1305,25 @@
         }
     }
 
-    sp<IOProfile> profile = getInputProfile(device, address,
-                                            samplingRate, format, channelMask,
-                                            flags);
-    if (profile == 0) {
-        //retry without flags
-        audio_input_flags_t log_flags = flags;
-        flags = AUDIO_INPUT_FLAG_NONE;
+    // find a compatible input profile (not necessarily identical in parameters)
+    sp<IOProfile> profile;
+    // samplingRate and flags may be updated by getInputProfile
+    uint32_t profileSamplingRate = samplingRate;
+    audio_format_t profileFormat = format;
+    audio_channel_mask_t profileChannelMask = channelMask;
+    audio_input_flags_t profileFlags = flags;
+    for (;;) {
         profile = getInputProfile(device, address,
-                                  samplingRate, format, channelMask,
-                                  flags);
-        if (profile == 0) {
+                                  profileSamplingRate, profileFormat, profileChannelMask,
+                                  profileFlags);
+        if (profile != 0) {
+            break; // success
+        } else if (profileFlags != AUDIO_INPUT_FLAG_NONE) {
+            profileFlags = AUDIO_INPUT_FLAG_NONE; // retry
+        } else { // fail
             ALOGW("getInputForAttr() could not find profile for device 0x%X, samplingRate %u,"
                     "format %#x, channelMask 0x%X, flags %#x",
-                    device, samplingRate, format, channelMask, log_flags);
+                    device, samplingRate, format, channelMask, flags);
             return BAD_VALUE;
         }
     }
@@ -1327,9 +1334,9 @@
     }
 
     audio_config_t config = AUDIO_CONFIG_INITIALIZER;
-    config.sample_rate = samplingRate;
-    config.channel_mask = channelMask;
-    config.format = format;
+    config.sample_rate = profileSamplingRate;
+    config.channel_mask = profileChannelMask;
+    config.format = profileFormat;
 
     status_t status = mpClientInterface->openInput(profile->getModuleHandle(),
                                                    input,
@@ -1337,14 +1344,15 @@
                                                    &device,
                                                    address,
                                                    halInputSource,
-                                                   flags);
+                                                   profileFlags);
 
     // only accept input with the exact requested set of parameters
     if (status != NO_ERROR || *input == AUDIO_IO_HANDLE_NONE ||
-        (samplingRate != config.sample_rate) ||
-        (format != config.format) ||
-        (channelMask != config.channel_mask)) {
-        ALOGW("getInputForAttr() failed opening input: samplingRate %d, format %d, channelMask %x",
+        (profileSamplingRate != config.sample_rate) ||
+        (profileFormat != config.format) ||
+        (profileChannelMask != config.channel_mask)) {
+        ALOGW("getInputForAttr() failed opening input: samplingRate %d, format %d,"
+                " channelMask %x",
                 samplingRate, format, channelMask);
         if (*input != AUDIO_IO_HANDLE_NONE) {
             mpClientInterface->closeInput(*input);
@@ -1356,9 +1364,9 @@
     inputDesc->mInputSource = inputSource;
     inputDesc->mRefCount = 0;
     inputDesc->mOpenRefCount = 1;
-    inputDesc->mSamplingRate = samplingRate;
-    inputDesc->mFormat = format;
-    inputDesc->mChannelMask = channelMask;
+    inputDesc->mSamplingRate = profileSamplingRate;
+    inputDesc->mFormat = profileFormat;
+    inputDesc->mChannelMask = profileChannelMask;
     inputDesc->mDevice = device;
     inputDesc->mSessions.add(session);
     inputDesc->mIsSoundTrigger = isSoundTrigger;
@@ -2122,9 +2130,12 @@
                                                            patch->sources[0].sample_rate,
                                                            NULL,  // updatedSamplingRate
                                                            patch->sources[0].format,
+                                                           NULL,  // updatedFormat
                                                            patch->sources[0].channel_mask,
+                                                           NULL,  // updatedChannelMask
                                                            AUDIO_OUTPUT_FLAG_NONE /*FIXME*/)) {
-                ALOGV("createAudioPatch() profile not supported for device %08x", devDesc->type());
+                ALOGV("createAudioPatch() profile not supported for device %08x",
+                        devDesc->type());
                 return INVALID_OPERATION;
             }
             devices.add(devDesc);
@@ -2176,7 +2187,9 @@
                                                           patch->sinks[0].sample_rate,
                                                           NULL, /*updatedSampleRate*/
                                                           patch->sinks[0].format,
+                                                          NULL, /*updatedFormat*/
                                                           patch->sinks[0].channel_mask,
+                                                          NULL, /*updatedChannelMask*/
                                                           // FIXME for the parameter type,
                                                           // and the NONE
                                                           (audio_output_flags_t)
@@ -4201,12 +4214,15 @@
 sp<IOProfile> AudioPolicyManager::getInputProfile(audio_devices_t device,
                                                   String8 address,
                                                   uint32_t& samplingRate,
-                                                  audio_format_t format,
-                                                  audio_channel_mask_t channelMask,
+                                                  audio_format_t& format,
+                                                  audio_channel_mask_t& channelMask,
                                                   audio_input_flags_t flags)
 {
     // Choose an input profile based on the requested capture parameters: select the first available
     // profile supporting all requested parameters.
+    //
+    // TODO: perhaps isCompatibleProfile should return a "matching" score so we can return
+    // the best matching profile, not the first one.
 
     for (size_t i = 0; i < mHwModules.size(); i++)
     {
@@ -4219,7 +4235,11 @@
             // profile->log();
             if (profile->isCompatibleProfile(device, address, samplingRate,
                                              &samplingRate /*updatedSamplingRate*/,
-                                             format, channelMask, (audio_output_flags_t) flags)) {
+                                             format,
+                                             &format /*updatedFormat*/,
+                                             channelMask,
+                                             &channelMask /*updatedChannelMask*/,
+                                             (audio_output_flags_t) flags)) {
 
                 return profile;
             }
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.h b/services/audiopolicy/managerdefault/AudioPolicyManager.h
index 11fd5ff..fe6b986 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.h
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.h
@@ -470,12 +470,12 @@
         audio_io_handle_t selectOutput(const SortedVector<audio_io_handle_t>& outputs,
                                        audio_output_flags_t flags,
                                        audio_format_t format);
-        // samplingRate parameter is an in/out and so may be modified
+        // samplingRate, format, channelMask are in/out and so may be modified
         sp<IOProfile> getInputProfile(audio_devices_t device,
                                       String8 address,
                                       uint32_t& samplingRate,
-                                      audio_format_t format,
-                                      audio_channel_mask_t channelMask,
+                                      audio_format_t& format,
+                                      audio_channel_mask_t& channelMask,
                                       audio_input_flags_t flags);
         sp<IOProfile> getProfileForDirectOutput(audio_devices_t device,
                                                        uint32_t samplingRate,