am 073dec19: am 22990fe1: Merge "Back to the old way of making sure that no more buffers are submitted" into klp-dev

* commit '073dec19db486290a02d9952e392df9cda5700ad':
  Back to the old way of making sure that no more buffers are submitted
diff --git a/drm/libdrmframework/plugins/forward-lock/FwdLockEngine/src/FwdLockEngine.cpp b/drm/libdrmframework/plugins/forward-lock/FwdLockEngine/src/FwdLockEngine.cpp
index 234aef2..f400732 100644
--- a/drm/libdrmframework/plugins/forward-lock/FwdLockEngine/src/FwdLockEngine.cpp
+++ b/drm/libdrmframework/plugins/forward-lock/FwdLockEngine/src/FwdLockEngine.cpp
@@ -316,6 +316,7 @@
 
     if (-1 < fileDesc) {
         if (FwdLockFile_attach(fileDesc) < 0) {
+            close(fileDesc);
             return mimeString;
         }
         const char* pMimeType = FwdLockFile_GetContentType(fileDesc);
diff --git a/include/media/AudioRecord.h b/include/media/AudioRecord.h
index 052064d..fb47448 100644
--- a/include/media/AudioRecord.h
+++ b/include/media/AudioRecord.h
@@ -60,7 +60,7 @@
         size_t      frameCount;     // number of sample frames corresponding to size;
                                     // on input it is the number of frames available,
                                     // on output is the number of frames actually drained
-                                    // (currently ignored, but will make the primary field in future)
+                                    // (currently ignored but will make the primary field in future)
 
         size_t      size;           // input/output in bytes == frameCount * frameSize
                                     // FIXME this is redundant with respect to frameCount,
@@ -446,7 +446,8 @@
                                                     // notification callback
     uint32_t                mNotificationFramesAct; // actual number of frames between each
                                                     // notification callback
-    bool                    mRefreshRemaining;  // processAudioBuffer() should refresh next 2
+    bool                    mRefreshRemaining;      // processAudioBuffer() should refresh
+                                                    // mRemainingFrames and mRetryOnPartialBuffer
 
     // These are private to processAudioBuffer(), and are not protected by a lock
     uint32_t                mRemainingFrames;       // number of frames to request in obtainBuffer()
diff --git a/include/media/AudioSystem.h b/include/media/AudioSystem.h
index 225ef76..b96b8a1 100644
--- a/include/media/AudioSystem.h
+++ b/include/media/AudioSystem.h
@@ -155,7 +155,8 @@
     class OutputDescriptor {
     public:
         OutputDescriptor()
-        : samplingRate(0), format(AUDIO_FORMAT_DEFAULT), channelMask(0), frameCount(0), latency(0)  {}
+        : samplingRate(0), format(AUDIO_FORMAT_DEFAULT), channelMask(0), frameCount(0), latency(0)
+            {}
 
         uint32_t samplingRate;
         audio_format_t format;
diff --git a/include/media/AudioTimestamp.h b/include/media/AudioTimestamp.h
index c29c7e5..99e9c3e 100644
--- a/include/media/AudioTimestamp.h
+++ b/include/media/AudioTimestamp.h
@@ -19,6 +19,8 @@
 
 #include <time.h>
 
+namespace android {
+
 class AudioTimestamp {
 public:
     AudioTimestamp() : mPosition(0) {
@@ -30,4 +32,6 @@
     struct timespec mTime;     // corresponding CLOCK_MONOTONIC when frame is expected to present
 };
 
+}   // namespace
+
 #endif  // ANDROID_AUDIO_TIMESTAMP_H
diff --git a/include/media/AudioTrack.h b/include/media/AudioTrack.h
index f379ee5..bec77ce 100644
--- a/include/media/AudioTrack.h
+++ b/include/media/AudioTrack.h
@@ -666,7 +666,6 @@
     size_t                  mReqFrameCount;         // frame count to request the next time a new
                                                     // IAudioTrack is needed
 
-
     // constant after constructor or set()
     audio_format_t          mFormat;                // as requested by client, not forced to 16-bit
     audio_stream_type_t     mStreamType;
@@ -705,7 +704,8 @@
     uint32_t                mNotificationFramesAct; // actual number of frames between each
                                                     // notification callback,
                                                     // at initial source sample rate
-    bool                    mRefreshRemaining;      // processAudioBuffer() should refresh next 2
+    bool                    mRefreshRemaining;      // processAudioBuffer() should refresh
+                                                    // mRemainingFrames and mRetryOnPartialBuffer
 
     // These are private to processAudioBuffer(), and are not protected by a lock
     uint32_t                mRemainingFrames;       // number of frames to request in obtainBuffer()
diff --git a/include/media/stagefright/ACodec.h b/include/media/stagefright/ACodec.h
index e796ab3..2fcfc62 100644
--- a/include/media/stagefright/ACodec.h
+++ b/include/media/stagefright/ACodec.h
@@ -247,6 +247,8 @@
             int32_t numChannels, int32_t sampleRate, int32_t bitRate,
             int32_t aacProfile, bool isADTS);
 
+    status_t setupAC3Codec(bool encoder, int32_t numChannels, int32_t sampleRate);
+
     status_t selectAudioPortFormat(
             OMX_U32 portIndex, OMX_AUDIO_CODINGTYPE desiredFormat);
 
diff --git a/include/media/stagefright/DataSource.h b/include/media/stagefright/DataSource.h
index 742bc0e..157b1aa 100644
--- a/include/media/stagefright/DataSource.h
+++ b/include/media/stagefright/DataSource.h
@@ -80,7 +80,6 @@
             const sp<DataSource> &source, String8 *mimeType,
             float *confidence, sp<AMessage> *meta);
 
-    static void RegisterSniffer(SnifferFunc func);
     static void RegisterDefaultSniffers();
 
     // for DRM
@@ -101,6 +100,9 @@
 private:
     static Mutex gSnifferMutex;
     static List<SnifferFunc> gSniffers;
+    static bool gSniffersRegistered;
+
+    static void RegisterSniffer_l(SnifferFunc func);
 
     DataSource(const DataSource &);
     DataSource &operator=(const DataSource &);
diff --git a/include/media/stagefright/MediaDefs.h b/include/media/stagefright/MediaDefs.h
index 85693d4..cf5beda 100644
--- a/include/media/stagefright/MediaDefs.h
+++ b/include/media/stagefright/MediaDefs.h
@@ -44,6 +44,7 @@
 extern const char *MEDIA_MIMETYPE_AUDIO_FLAC;
 extern const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS;
 extern const char *MEDIA_MIMETYPE_AUDIO_MSGSM;
+extern const char *MEDIA_MIMETYPE_AUDIO_AC3;
 
 extern const char *MEDIA_MIMETYPE_CONTAINER_MPEG4;
 extern const char *MEDIA_MIMETYPE_CONTAINER_WAV;
diff --git a/include/media/stagefright/OMXCodec.h b/include/media/stagefright/OMXCodec.h
index daaf20f..5121c17 100644
--- a/include/media/stagefright/OMXCodec.h
+++ b/include/media/stagefright/OMXCodec.h
@@ -248,6 +248,8 @@
             int32_t numChannels, int32_t sampleRate, int32_t bitRate,
             int32_t aacProfile, bool isADTS);
 
+    status_t setAC3Format(int32_t numChannels, int32_t sampleRate);
+
     void setG711Format(int32_t numChannels);
 
     status_t setVideoPortFormatType(
diff --git a/include/media/stagefright/SkipCutBuffer.h b/include/media/stagefright/SkipCutBuffer.h
index 2653b53..098aa69 100644
--- a/include/media/stagefright/SkipCutBuffer.h
+++ b/include/media/stagefright/SkipCutBuffer.h
@@ -47,6 +47,7 @@
  private:
     void write(const char *src, size_t num);
     size_t read(char *dst, size_t num);
+    int32_t mSkip;
     int32_t mFrontPadding;
     int32_t mBackPadding;
     int32_t mWriteHead;
diff --git a/include/private/media/AudioTrackShared.h b/include/private/media/AudioTrackShared.h
index 7fd9379..85862a8 100644
--- a/include/private/media/AudioTrackShared.h
+++ b/include/private/media/AudioTrackShared.h
@@ -48,7 +48,7 @@
 #define CBLK_STREAM_END_DONE 0x400 // set by server on render completion, cleared by client
 
 //EL_FIXME 20 seconds may not be enough and must be reconciled with new obtainBuffer implementation
-#define MAX_RUN_OFFLOADED_TIMEOUT_MS 20000 //assuming upto a maximum of 20 seconds of offloaded
+#define MAX_RUN_OFFLOADED_TIMEOUT_MS 20000 // assuming up to a maximum of 20 seconds of offloaded
 
 struct AudioTrackSharedStreaming {
     // similar to NBAIO MonoPipe
diff --git a/media/libmedia/AudioTrack.cpp b/media/libmedia/AudioTrack.cpp
index b8a89a0..8319dcd 100644
--- a/media/libmedia/AudioTrack.cpp
+++ b/media/libmedia/AudioTrack.cpp
@@ -250,9 +250,6 @@
     if (format == AUDIO_FORMAT_DEFAULT) {
         format = AUDIO_FORMAT_PCM_16_BIT;
     }
-    if (channelMask == 0) {
-        channelMask = AUDIO_CHANNEL_OUT_STEREO;
-    }
 
     // validate parameters
     if (!audio_is_valid_format(format)) {
@@ -260,6 +257,11 @@
         return BAD_VALUE;
     }
 
+    if (!audio_is_output_channel(channelMask)) {
+        ALOGE("Invalid channel mask %#x", channelMask);
+        return BAD_VALUE;
+    }
+
     // AudioFlinger does not currently support 8-bit data in shared memory
     if (format == AUDIO_FORMAT_PCM_8_BIT && sharedBuffer != 0) {
         ALOGE("8-bit data in shared memory is not supported");
@@ -282,10 +284,6 @@
         flags = (audio_output_flags_t)(flags &~AUDIO_OUTPUT_FLAG_DEEP_BUFFER);
     }
 
-    if (!audio_is_output_channel(channelMask)) {
-        ALOGE("Invalid channel mask %#x", channelMask);
-        return BAD_VALUE;
-    }
     mChannelMask = channelMask;
     uint32_t channelCount = popcount(channelMask);
     mChannelCount = channelCount;
@@ -445,8 +443,7 @@
 void AudioTrack::stop()
 {
     AutoMutex lock(mLock);
-    // FIXME pause then stop should not be a nop
-    if (mState != STATE_ACTIVE) {
+    if (mState != STATE_ACTIVE && mState != STATE_PAUSED) {
         return;
     }
 
diff --git a/media/libmedia/IAudioFlinger.cpp b/media/libmedia/IAudioFlinger.cpp
index acfaea0..9df10f0 100644
--- a/media/libmedia/IAudioFlinger.cpp
+++ b/media/libmedia/IAudioFlinger.cpp
@@ -139,7 +139,7 @@
             lStatus = reply.readInt32();
             track = interface_cast<IAudioTrack>(reply.readStrongBinder());
         }
-        if (status) {
+        if (status != NULL) {
             *status = lStatus;
         }
         return track;
@@ -198,7 +198,7 @@
                 }
             }
         }
-        if (status) {
+        if (status != NULL) {
             *status = lStatus;
         }
         return record;
@@ -415,15 +415,25 @@
         audio_io_handle_t output = (audio_io_handle_t) reply.readInt32();
         ALOGV("openOutput() returned output, %d", output);
         devices = (audio_devices_t)reply.readInt32();
-        if (pDevices != NULL) *pDevices = devices;
+        if (pDevices != NULL) {
+            *pDevices = devices;
+        }
         samplingRate = reply.readInt32();
-        if (pSamplingRate != NULL) *pSamplingRate = samplingRate;
+        if (pSamplingRate != NULL) {
+            *pSamplingRate = samplingRate;
+        }
         format = (audio_format_t) reply.readInt32();
-        if (pFormat != NULL) *pFormat = format;
+        if (pFormat != NULL) {
+            *pFormat = format;
+        }
         channelMask = (audio_channel_mask_t)reply.readInt32();
-        if (pChannelMask != NULL) *pChannelMask = channelMask;
+        if (pChannelMask != NULL) {
+            *pChannelMask = channelMask;
+        }
         latency = reply.readInt32();
-        if (pLatencyMs != NULL) *pLatencyMs = latency;
+        if (pLatencyMs != NULL) {
+            *pLatencyMs = latency;
+        }
         return output;
     }
 
@@ -487,13 +497,21 @@
         remote()->transact(OPEN_INPUT, data, &reply);
         audio_io_handle_t input = (audio_io_handle_t) reply.readInt32();
         devices = (audio_devices_t)reply.readInt32();
-        if (pDevices != NULL) *pDevices = devices;
+        if (pDevices != NULL) {
+            *pDevices = devices;
+        }
         samplingRate = reply.readInt32();
-        if (pSamplingRate != NULL) *pSamplingRate = samplingRate;
+        if (pSamplingRate != NULL) {
+            *pSamplingRate = samplingRate;
+        }
         format = (audio_format_t) reply.readInt32();
-        if (pFormat != NULL) *pFormat = format;
+        if (pFormat != NULL) {
+            *pFormat = format;
+        }
         channelMask = (audio_channel_mask_t)reply.readInt32();
-        if (pChannelMask != NULL) *pChannelMask = channelMask;
+        if (pChannelMask != NULL) {
+            *pChannelMask = channelMask;
+        }
         return input;
     }
 
@@ -535,11 +553,11 @@
         status_t status = reply.readInt32();
         if (status == NO_ERROR) {
             uint32_t tmp = reply.readInt32();
-            if (halFrames) {
+            if (halFrames != NULL) {
                 *halFrames = tmp;
             }
             tmp = reply.readInt32();
-            if (dspFrames) {
+            if (dspFrames != NULL) {
                 *dspFrames = tmp;
             }
         }
@@ -657,7 +675,7 @@
 
         if (pDesc == NULL) {
             return effect;
-            if (status) {
+            if (status != NULL) {
                 *status = BAD_VALUE;
             }
         }
@@ -675,7 +693,7 @@
         } else {
             lStatus = reply.readInt32();
             int tmp = reply.readInt32();
-            if (id) {
+            if (id != NULL) {
                 *id = tmp;
             }
             tmp = reply.readInt32();
@@ -685,7 +703,7 @@
             effect = interface_cast<IEffect>(reply.readStrongBinder());
             reply.read(pDesc, sizeof(effect_descriptor_t));
         }
-        if (status) {
+        if (status != NULL) {
             *status = lStatus;
         }
 
diff --git a/media/libmedia/SoundPool.cpp b/media/libmedia/SoundPool.cpp
index 22e9fad..b420c95 100644
--- a/media/libmedia/SoundPool.cpp
+++ b/media/libmedia/SoundPool.cpp
@@ -600,16 +600,15 @@
         // wrong audio audio buffer size  (mAudioBufferSize)
         unsigned long toggle = mToggle ^ 1;
         void *userData = (void *)((unsigned long)this | toggle);
-        uint32_t channels = (numChannels == 2) ?
-                AUDIO_CHANNEL_OUT_STEREO : AUDIO_CHANNEL_OUT_MONO;
+        audio_channel_mask_t channelMask = audio_channel_out_mask_from_count(numChannels);
 
         // do not create a new audio track if current track is compatible with sample parameters
 #ifdef USE_SHARED_MEM_BUFFER
         newTrack = new AudioTrack(streamType, sampleRate, sample->format(),
-                channels, sample->getIMemory(), AUDIO_OUTPUT_FLAG_FAST, callback, userData);
+                channelMask, sample->getIMemory(), AUDIO_OUTPUT_FLAG_FAST, callback, userData);
 #else
         newTrack = new AudioTrack(streamType, sampleRate, sample->format(),
-                channels, frameCount, AUDIO_OUTPUT_FLAG_FAST, callback, userData,
+                channelMask, frameCount, AUDIO_OUTPUT_FLAG_FAST, callback, userData,
                 bufferFrames);
 #endif
         oldTrack = mAudioTrack;
diff --git a/media/libmediaplayerservice/HDCP.cpp b/media/libmediaplayerservice/HDCP.cpp
index c2ac1a3..afe3936 100644
--- a/media/libmediaplayerservice/HDCP.cpp
+++ b/media/libmediaplayerservice/HDCP.cpp
@@ -107,11 +107,7 @@
         return NO_INIT;
     }
 
-    // TO-DO:
-    // Only support HDCP_CAPS_ENCRYPT (byte-array to byte-array) for now.
-    // use mHDCPModule->getCaps() when the HDCP libraries get updated.
-    //return mHDCPModule->getCaps();
-    return HDCPModule::HDCP_CAPS_ENCRYPT;
+    return mHDCPModule->getCaps();
 }
 
 status_t HDCP::encrypt(
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index 528fdb9..6d2191d 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -35,7 +35,9 @@
 
 #include <media/hardware/HardwareAPI.h>
 
+#include <OMX_AudioExt.h>
 #include <OMX_Component.h>
+#include <OMX_IndexExt.h>
 
 #include "include/avc_utils.h"
 
@@ -965,6 +967,10 @@
             "audio_decoder.flac", "audio_encoder.flac" },
         { MEDIA_MIMETYPE_AUDIO_MSGSM,
             "audio_decoder.gsm", "audio_encoder.gsm" },
+        { MEDIA_MIMETYPE_VIDEO_MPEG2,
+            "video_decoder.mpeg2", "video_encoder.mpeg2" },
+        { MEDIA_MIMETYPE_AUDIO_AC3,
+            "audio_decoder.ac3", "audio_encoder.ac3" },
     };
 
     static const size_t kNumMimeToRole =
@@ -1256,6 +1262,15 @@
         } else {
             err = setupRawAudioFormat(kPortIndexInput, sampleRate, numChannels);
         }
+    } else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AC3)) {
+        int32_t numChannels;
+        int32_t sampleRate;
+        if (!msg->findInt32("channel-count", &numChannels)
+                || !msg->findInt32("sample-rate", &sampleRate)) {
+            err = INVALID_OPERATION;
+        } else {
+            err = setupAC3Codec(encoder, numChannels, sampleRate);
+        }
     }
 
     if (err != OK) {
@@ -1452,6 +1467,44 @@
             mNode, OMX_IndexParamAudioAac, &profile, sizeof(profile));
 }
 
+status_t ACodec::setupAC3Codec(
+        bool encoder, int32_t numChannels, int32_t sampleRate) {
+    status_t err = setupRawAudioFormat(
+            encoder ? kPortIndexInput : kPortIndexOutput, sampleRate, numChannels);
+
+    if (err != OK) {
+        return err;
+    }
+
+    if (encoder) {
+        ALOGW("AC3 encoding is not supported.");
+        return INVALID_OPERATION;
+    }
+
+    OMX_AUDIO_PARAM_ANDROID_AC3TYPE def;
+    InitOMXParams(&def);
+    def.nPortIndex = kPortIndexInput;
+
+    err = mOMX->getParameter(
+            mNode,
+            (OMX_INDEXTYPE)OMX_IndexParamAudioAndroidAc3,
+            &def,
+            sizeof(def));
+
+    if (err != OK) {
+        return err;
+    }
+
+    def.nChannels = numChannels;
+    def.nSampleRate = sampleRate;
+
+    return mOMX->setParameter(
+            mNode,
+            (OMX_INDEXTYPE)OMX_IndexParamAudioAndroidAc3,
+            &def,
+            sizeof(def));
+}
+
 static OMX_AUDIO_AMRBANDMODETYPE pickModeFromBitRate(
         bool isAMRWB, int32_t bps) {
     if (isAMRWB) {
@@ -2546,7 +2599,7 @@
         {
             OMX_AUDIO_PORTDEFINITIONTYPE *audioDef = &def.format.audio;
 
-            switch (audioDef->eEncoding) {
+            switch ((int)audioDef->eEncoding) {
                 case OMX_AUDIO_CodingPCM:
                 {
                     OMX_AUDIO_PARAM_PCMMODETYPE params;
@@ -2652,6 +2705,24 @@
                     break;
                 }
 
+                case OMX_AUDIO_CodingAndroidAC3:
+                {
+                    OMX_AUDIO_PARAM_ANDROID_AC3TYPE params;
+                    InitOMXParams(&params);
+                    params.nPortIndex = kPortIndexOutput;
+
+                    CHECK_EQ((status_t)OK, mOMX->getParameter(
+                            mNode,
+                            (OMX_INDEXTYPE)OMX_IndexParamAudioAndroidAc3,
+                            &params,
+                            sizeof(params)));
+
+                    notify->setString("mime", MEDIA_MIMETYPE_AUDIO_AC3);
+                    notify->setInt32("channel-count", params.nChannels);
+                    notify->setInt32("sample-rate", params.nSampleRate);
+                    break;
+                }
+
                 default:
                     TRESPASS();
             }
diff --git a/media/libstagefright/DataSource.cpp b/media/libstagefright/DataSource.cpp
index fc6fd9c..97987e2 100644
--- a/media/libstagefright/DataSource.cpp
+++ b/media/libstagefright/DataSource.cpp
@@ -107,6 +107,7 @@
 
 Mutex DataSource::gSnifferMutex;
 List<DataSource::SnifferFunc> DataSource::gSniffers;
+bool DataSource::gSniffersRegistered = false;
 
 bool DataSource::sniff(
         String8 *mimeType, float *confidence, sp<AMessage> *meta) {
@@ -114,7 +115,13 @@
     *confidence = 0.0f;
     meta->clear();
 
-    Mutex::Autolock autoLock(gSnifferMutex);
+    {
+        Mutex::Autolock autoLock(gSnifferMutex);
+        if (!gSniffersRegistered) {
+            return false;
+        }
+    }
+
     for (List<SnifferFunc>::iterator it = gSniffers.begin();
          it != gSniffers.end(); ++it) {
         String8 newMimeType;
@@ -133,9 +140,7 @@
 }
 
 // static
-void DataSource::RegisterSniffer(SnifferFunc func) {
-    Mutex::Autolock autoLock(gSnifferMutex);
-
+void DataSource::RegisterSniffer_l(SnifferFunc func) {
     for (List<SnifferFunc>::iterator it = gSniffers.begin();
          it != gSniffers.end(); ++it) {
         if (*it == func) {
@@ -148,23 +153,29 @@
 
 // static
 void DataSource::RegisterDefaultSniffers() {
-    RegisterSniffer(SniffMPEG4);
-    RegisterSniffer(SniffMatroska);
-    RegisterSniffer(SniffOgg);
-    RegisterSniffer(SniffWAV);
-    RegisterSniffer(SniffFLAC);
-    RegisterSniffer(SniffAMR);
-    RegisterSniffer(SniffMPEG2TS);
-    RegisterSniffer(SniffMP3);
-    RegisterSniffer(SniffAAC);
-    RegisterSniffer(SniffMPEG2PS);
-    RegisterSniffer(SniffWVM);
+    Mutex::Autolock autoLock(gSnifferMutex);
+    if (gSniffersRegistered) {
+        return;
+    }
+
+    RegisterSniffer_l(SniffMPEG4);
+    RegisterSniffer_l(SniffMatroska);
+    RegisterSniffer_l(SniffOgg);
+    RegisterSniffer_l(SniffWAV);
+    RegisterSniffer_l(SniffFLAC);
+    RegisterSniffer_l(SniffAMR);
+    RegisterSniffer_l(SniffMPEG2TS);
+    RegisterSniffer_l(SniffMP3);
+    RegisterSniffer_l(SniffAAC);
+    RegisterSniffer_l(SniffMPEG2PS);
+    RegisterSniffer_l(SniffWVM);
 
     char value[PROPERTY_VALUE_MAX];
     if (property_get("drm.service.enabled", value, NULL)
             && (!strcmp(value, "1") || !strcasecmp(value, "true"))) {
-        RegisterSniffer(SniffDRM);
+        RegisterSniffer_l(SniffDRM);
     }
+    gSniffersRegistered = true;
 }
 
 // static
diff --git a/media/libstagefright/MediaDefs.cpp b/media/libstagefright/MediaDefs.cpp
index b5d4e44..340cba7 100644
--- a/media/libstagefright/MediaDefs.cpp
+++ b/media/libstagefright/MediaDefs.cpp
@@ -42,6 +42,7 @@
 const char *MEDIA_MIMETYPE_AUDIO_FLAC = "audio/flac";
 const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS = "audio/aac-adts";
 const char *MEDIA_MIMETYPE_AUDIO_MSGSM = "audio/gsm";
+const char *MEDIA_MIMETYPE_AUDIO_AC3 = "audio/ac3";
 
 const char *MEDIA_MIMETYPE_CONTAINER_MPEG4 = "video/mp4";
 const char *MEDIA_MIMETYPE_CONTAINER_WAV = "audio/x-wav";
diff --git a/media/libstagefright/OMXCodec.cpp b/media/libstagefright/OMXCodec.cpp
index 7f56af8..063ab49 100644
--- a/media/libstagefright/OMXCodec.cpp
+++ b/media/libstagefright/OMXCodec.cpp
@@ -40,7 +40,9 @@
 #include <utils/Vector.h>
 
 #include <OMX_Audio.h>
+#include <OMX_AudioExt.h>
 #include <OMX_Component.h>
+#include <OMX_IndexExt.h>
 
 #include "include/avc_utils.h"
 
@@ -533,6 +535,17 @@
                     sampleRate,
                     numChannels);
         }
+    } else if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_AC3, mMIME)) {
+        int32_t numChannels;
+        int32_t sampleRate;
+        CHECK(meta->findInt32(kKeyChannelCount, &numChannels));
+        CHECK(meta->findInt32(kKeySampleRate, &sampleRate));
+
+        status_t err = setAC3Format(numChannels, sampleRate);
+        if (err != OK) {
+            CODEC_LOGE("setAC3Format() failed (err = %d)", err);
+            return err;
+        }
     } else if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_G711_ALAW, mMIME)
             || !strcasecmp(MEDIA_MIMETYPE_AUDIO_G711_MLAW, mMIME)) {
         // These are PCM-like formats with a fixed sample rate but
@@ -1400,6 +1413,10 @@
             "audio_decoder.flac", "audio_encoder.flac" },
         { MEDIA_MIMETYPE_AUDIO_MSGSM,
             "audio_decoder.gsm", "audio_encoder.gsm" },
+        { MEDIA_MIMETYPE_VIDEO_MPEG2,
+            "video_decoder.mpeg2", "video_encoder.mpeg2" },
+        { MEDIA_MIMETYPE_AUDIO_AC3,
+            "audio_decoder.ac3", "audio_encoder.ac3" },
     };
 
     static const size_t kNumMimeToRole =
@@ -3495,6 +3512,31 @@
     return OK;
 }
 
+status_t OMXCodec::setAC3Format(int32_t numChannels, int32_t sampleRate) {
+    OMX_AUDIO_PARAM_ANDROID_AC3TYPE def;
+    InitOMXParams(&def);
+    def.nPortIndex = kPortIndexInput;
+
+    status_t err = mOMX->getParameter(
+            mNode,
+            (OMX_INDEXTYPE)OMX_IndexParamAudioAndroidAc3,
+            &def,
+            sizeof(def));
+
+    if (err != OK) {
+        return err;
+    }
+
+    def.nChannels = numChannels;
+    def.nSampleRate = sampleRate;
+
+    return mOMX->setParameter(
+            mNode,
+            (OMX_INDEXTYPE)OMX_IndexParamAudioAndroidAc3,
+            &def,
+            sizeof(def));
+}
+
 void OMXCodec::setG711Format(int32_t numChannels) {
     CHECK(!mIsEncoder);
     setRawAudioFormat(kPortIndexInput, 8000, numChannels);
@@ -4428,6 +4470,17 @@
                 mOutputFormat->setInt32(kKeyChannelCount, numChannels);
                 mOutputFormat->setInt32(kKeySampleRate, sampleRate);
                 mOutputFormat->setInt32(kKeyBitRate, bitRate);
+            } else if (audio_def->eEncoding ==
+                    (OMX_AUDIO_CODINGTYPE)OMX_AUDIO_CodingAndroidAC3) {
+                mOutputFormat->setCString(
+                        kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_AC3);
+                int32_t numChannels, sampleRate, bitRate;
+                inputFormat->findInt32(kKeyChannelCount, &numChannels);
+                inputFormat->findInt32(kKeySampleRate, &sampleRate);
+                inputFormat->findInt32(kKeyBitRate, &bitRate);
+                mOutputFormat->setInt32(kKeyChannelCount, numChannels);
+                mOutputFormat->setInt32(kKeySampleRate, sampleRate);
+                mOutputFormat->setInt32(kKeyBitRate, bitRate);
             } else {
                 CHECK(!"Should not be here. Unknown audio encoding.");
             }
diff --git a/media/libstagefright/SkipCutBuffer.cpp b/media/libstagefright/SkipCutBuffer.cpp
index 773854f..e2e6d79 100644
--- a/media/libstagefright/SkipCutBuffer.cpp
+++ b/media/libstagefright/SkipCutBuffer.cpp
@@ -25,7 +25,7 @@
 namespace android {
 
 SkipCutBuffer::SkipCutBuffer(int32_t skip, int32_t cut) {
-    mFrontPadding = skip;
+    mFrontPadding = mSkip = skip;
     mBackPadding = cut;
     mWriteHead = 0;
     mReadHead = 0;
@@ -94,6 +94,7 @@
 
 void SkipCutBuffer::clear() {
     mWriteHead = mReadHead = 0;
+    mFrontPadding = mSkip;
 }
 
 void SkipCutBuffer::write(const char *src, size_t num) {
diff --git a/media/libstagefright/codecs/aacdec/SoftAAC2.cpp b/media/libstagefright/codecs/aacdec/SoftAAC2.cpp
index 1b20cbb..f842e27 100644
--- a/media/libstagefright/codecs/aacdec/SoftAAC2.cpp
+++ b/media/libstagefright/codecs/aacdec/SoftAAC2.cpp
@@ -58,6 +58,8 @@
       mIsADTS(false),
       mInputBufferCount(0),
       mSignalledError(false),
+      mSawInputEos(false),
+      mSignalledOutputEos(false),
       mAnchorTimeUs(0),
       mNumSamplesOutput(0),
       mOutputPortSettingsChange(NONE) {
@@ -350,115 +352,83 @@
         return;
     }
 
-    while (!inQueue.empty() && !outQueue.empty()) {
-        BufferInfo *inInfo = *inQueue.begin();
-        OMX_BUFFERHEADERTYPE *inHeader = inInfo->mHeader;
+    while ((!inQueue.empty() || (mSawInputEos && !mSignalledOutputEos)) && !outQueue.empty()) {
+        BufferInfo *inInfo = NULL;
+        OMX_BUFFERHEADERTYPE *inHeader = NULL;
+        if (!inQueue.empty()) {
+            inInfo = *inQueue.begin();
+            inHeader = inInfo->mHeader;
+        }
 
         BufferInfo *outInfo = *outQueue.begin();
         OMX_BUFFERHEADERTYPE *outHeader = outInfo->mHeader;
+        outHeader->nFlags = 0;
 
-        if (inHeader->nFlags & OMX_BUFFERFLAG_EOS) {
-            inQueue.erase(inQueue.begin());
-            inInfo->mOwnedByUs = false;
-            notifyEmptyBufferDone(inHeader);
+        if (inHeader) {
+            if (inHeader->nFlags & OMX_BUFFERFLAG_EOS) {
+                mSawInputEos = true;
+            }
 
-            if (mDecoderHasData) {
-                // flush out the decoder's delayed data by calling DecodeFrame
-                // one more time, with the AACDEC_FLUSH flag set
-                INT_PCM *outBuffer =
-                        reinterpret_cast<INT_PCM *>(
-                                outHeader->pBuffer + outHeader->nOffset);
+            if (inHeader->nOffset == 0 && inHeader->nFilledLen) {
+                mAnchorTimeUs = inHeader->nTimeStamp;
+                mNumSamplesOutput = 0;
+            }
 
-                AAC_DECODER_ERROR decoderErr =
-                    aacDecoder_DecodeFrame(mAACDecoder,
-                                           outBuffer,
-                                           outHeader->nAllocLen,
-                                           AACDEC_FLUSH);
-                mDecoderHasData = false;
+            if (mIsADTS) {
+                size_t adtsHeaderSize = 0;
+                // skip 30 bits, aac_frame_length follows.
+                // ssssssss ssssiiip ppffffPc ccohCCll llllllll lll?????
 
-                if (decoderErr != AAC_DEC_OK) {
+                const uint8_t *adtsHeader = inHeader->pBuffer + inHeader->nOffset;
+
+                bool signalError = false;
+                if (inHeader->nFilledLen < 7) {
+                    ALOGE("Audio data too short to contain even the ADTS header. "
+                          "Got %ld bytes.", inHeader->nFilledLen);
+                    hexdump(adtsHeader, inHeader->nFilledLen);
+                    signalError = true;
+                } else {
+                    bool protectionAbsent = (adtsHeader[1] & 1);
+
+                    unsigned aac_frame_length =
+                        ((adtsHeader[3] & 3) << 11)
+                        | (adtsHeader[4] << 3)
+                        | (adtsHeader[5] >> 5);
+
+                    if (inHeader->nFilledLen < aac_frame_length) {
+                        ALOGE("Not enough audio data for the complete frame. "
+                              "Got %ld bytes, frame size according to the ADTS "
+                              "header is %u bytes.",
+                              inHeader->nFilledLen, aac_frame_length);
+                        hexdump(adtsHeader, inHeader->nFilledLen);
+                        signalError = true;
+                    } else {
+                        adtsHeaderSize = (protectionAbsent ? 7 : 9);
+
+                        inBuffer[0] = (UCHAR *)adtsHeader + adtsHeaderSize;
+                        inBufferLength[0] = aac_frame_length - adtsHeaderSize;
+
+                        inHeader->nOffset += adtsHeaderSize;
+                        inHeader->nFilledLen -= adtsHeaderSize;
+                    }
+                }
+
+                if (signalError) {
                     mSignalledError = true;
 
-                    notify(OMX_EventError, OMX_ErrorUndefined, decoderErr,
+                    notify(OMX_EventError,
+                           OMX_ErrorStreamCorrupt,
+                           ERROR_MALFORMED,
                            NULL);
 
                     return;
                 }
-
-                outHeader->nFilledLen =
-                        mStreamInfo->frameSize
-                            * sizeof(int16_t)
-                            * mStreamInfo->numChannels;
             } else {
-                // we never submitted any data to the decoder, so there's nothing to flush out
-                outHeader->nFilledLen = 0;
-            }
-
-            outHeader->nFlags = OMX_BUFFERFLAG_EOS;
-
-            outQueue.erase(outQueue.begin());
-            outInfo->mOwnedByUs = false;
-            notifyFillBufferDone(outHeader);
-            return;
-        }
-
-        if (inHeader->nOffset == 0) {
-            mAnchorTimeUs = inHeader->nTimeStamp;
-            mNumSamplesOutput = 0;
-        }
-
-        size_t adtsHeaderSize = 0;
-        if (mIsADTS) {
-            // skip 30 bits, aac_frame_length follows.
-            // ssssssss ssssiiip ppffffPc ccohCCll llllllll lll?????
-
-            const uint8_t *adtsHeader = inHeader->pBuffer + inHeader->nOffset;
-
-            bool signalError = false;
-            if (inHeader->nFilledLen < 7) {
-                ALOGE("Audio data too short to contain even the ADTS header. "
-                      "Got %ld bytes.", inHeader->nFilledLen);
-                hexdump(adtsHeader, inHeader->nFilledLen);
-                signalError = true;
-            } else {
-                bool protectionAbsent = (adtsHeader[1] & 1);
-
-                unsigned aac_frame_length =
-                    ((adtsHeader[3] & 3) << 11)
-                    | (adtsHeader[4] << 3)
-                    | (adtsHeader[5] >> 5);
-
-                if (inHeader->nFilledLen < aac_frame_length) {
-                    ALOGE("Not enough audio data for the complete frame. "
-                          "Got %ld bytes, frame size according to the ADTS "
-                          "header is %u bytes.",
-                          inHeader->nFilledLen, aac_frame_length);
-                    hexdump(adtsHeader, inHeader->nFilledLen);
-                    signalError = true;
-                } else {
-                    adtsHeaderSize = (protectionAbsent ? 7 : 9);
-
-                    inBuffer[0] = (UCHAR *)adtsHeader + adtsHeaderSize;
-                    inBufferLength[0] = aac_frame_length - adtsHeaderSize;
-
-                    inHeader->nOffset += adtsHeaderSize;
-                    inHeader->nFilledLen -= adtsHeaderSize;
-                }
-            }
-
-            if (signalError) {
-                mSignalledError = true;
-
-                notify(OMX_EventError,
-                       OMX_ErrorStreamCorrupt,
-                       ERROR_MALFORMED,
-                       NULL);
-
-                return;
+                inBuffer[0] = inHeader->pBuffer + inHeader->nOffset;
+                inBufferLength[0] = inHeader->nFilledLen;
             }
         } else {
-            inBuffer[0] = inHeader->pBuffer + inHeader->nOffset;
-            inBufferLength[0] = inHeader->nFilledLen;
+            inBufferLength[0] = 0;
         }
 
         // Fill and decode
@@ -471,50 +441,66 @@
         int prevNumChannels = mStreamInfo->numChannels;
 
         AAC_DECODER_ERROR decoderErr = AAC_DEC_NOT_ENOUGH_BITS;
-        while (bytesValid[0] > 0 && decoderErr == AAC_DEC_NOT_ENOUGH_BITS) {
+        while ((bytesValid[0] > 0 || mSawInputEos) && decoderErr == AAC_DEC_NOT_ENOUGH_BITS) {
+            mDecoderHasData |= (bytesValid[0] > 0);
             aacDecoder_Fill(mAACDecoder,
                             inBuffer,
                             inBufferLength,
                             bytesValid);
-            mDecoderHasData = true;
 
             decoderErr = aacDecoder_DecodeFrame(mAACDecoder,
                                                 outBuffer,
                                                 outHeader->nAllocLen,
                                                 0 /* flags */);
-
             if (decoderErr == AAC_DEC_NOT_ENOUGH_BITS) {
-                ALOGW("Not enough bits, bytesValid %d", bytesValid[0]);
+                if (mSawInputEos && bytesValid[0] <= 0) {
+                    if (mDecoderHasData) {
+                        // flush out the decoder's delayed data by calling DecodeFrame
+                        // one more time, with the AACDEC_FLUSH flag set
+                        decoderErr = aacDecoder_DecodeFrame(mAACDecoder,
+                                                            outBuffer,
+                                                            outHeader->nAllocLen,
+                                                            AACDEC_FLUSH);
+                        mDecoderHasData = false;
+                    }
+                    outHeader->nFlags = OMX_BUFFERFLAG_EOS;
+                    mSignalledOutputEos = true;
+                    break;
+                } else {
+                    ALOGW("Not enough bits, bytesValid %d", bytesValid[0]);
+                }
             }
         }
 
         size_t numOutBytes =
             mStreamInfo->frameSize * sizeof(int16_t) * mStreamInfo->numChannels;
 
-        if (decoderErr == AAC_DEC_OK) {
-            UINT inBufferUsedLength = inBufferLength[0] - bytesValid[0];
-            inHeader->nFilledLen -= inBufferUsedLength;
-            inHeader->nOffset += inBufferUsedLength;
-        } else {
-            ALOGW("AAC decoder returned error %d, substituting silence",
-                  decoderErr);
+        if (inHeader) {
+            if (decoderErr == AAC_DEC_OK) {
+                UINT inBufferUsedLength = inBufferLength[0] - bytesValid[0];
+                inHeader->nFilledLen -= inBufferUsedLength;
+                inHeader->nOffset += inBufferUsedLength;
+            } else {
+                ALOGW("AAC decoder returned error %d, substituting silence",
+                      decoderErr);
 
-            memset(outHeader->pBuffer + outHeader->nOffset, 0, numOutBytes);
+                memset(outHeader->pBuffer + outHeader->nOffset, 0, numOutBytes);
 
-            // Discard input buffer.
-            inHeader->nFilledLen = 0;
+                // Discard input buffer.
+                inHeader->nFilledLen = 0;
 
-            aacDecoder_SetParam(mAACDecoder, AAC_TPDEC_CLEAR_BUFFER, 1);
+                aacDecoder_SetParam(mAACDecoder, AAC_TPDEC_CLEAR_BUFFER, 1);
 
-            // fall through
-        }
+                // fall through
+            }
 
-        if (inHeader->nFilledLen == 0) {
-            inInfo->mOwnedByUs = false;
-            inQueue.erase(inQueue.begin());
-            inInfo = NULL;
-            notifyEmptyBufferDone(inHeader);
-            inHeader = NULL;
+            if (inHeader->nFilledLen == 0) {
+                inInfo->mOwnedByUs = false;
+                inQueue.erase(inQueue.begin());
+                inInfo = NULL;
+                notifyEmptyBufferDone(inHeader);
+                inHeader = NULL;
+            }
         }
 
         /*
@@ -555,7 +541,6 @@
             // we've previously decoded valid data, in the latter case
             // (decode failed) we'll output a silent frame.
             outHeader->nFilledLen = numOutBytes;
-            outHeader->nFlags = 0;
 
             outHeader->nTimeStamp =
                 mAnchorTimeUs
@@ -582,6 +567,12 @@
         // depend on fragments from the last one decoded.
         // drain all existing data
         drainDecoder();
+        // force decoder loop to drop the first decoded buffer by resetting these state variables,
+        // but only if initialization has already happened.
+        if (mInputBufferCount != 0) {
+            mInputBufferCount = 1;
+            mStreamInfo->sampleRate = 0;
+        }
     }
 }
 
@@ -606,6 +597,8 @@
     mStreamInfo->sampleRate = 0;
 
     mSignalledError = false;
+    mSawInputEos = false;
+    mSignalledOutputEos = false;
     mOutputPortSettingsChange = NONE;
 }
 
diff --git a/media/libstagefright/codecs/aacdec/SoftAAC2.h b/media/libstagefright/codecs/aacdec/SoftAAC2.h
index 2d960ab..a7ea1e2 100644
--- a/media/libstagefright/codecs/aacdec/SoftAAC2.h
+++ b/media/libstagefright/codecs/aacdec/SoftAAC2.h
@@ -55,6 +55,8 @@
     bool mDecoderHasData;
     size_t mInputBufferCount;
     bool mSignalledError;
+    bool mSawInputEos;
+    bool mSignalledOutputEos;
     int64_t mAnchorTimeUs;
     int64_t mNumSamplesOutput;
 
diff --git a/media/libstagefright/codecs/mp3dec/SoftMP3.cpp b/media/libstagefright/codecs/mp3dec/SoftMP3.cpp
index 7c382fb..877e3cb 100644
--- a/media/libstagefright/codecs/mp3dec/SoftMP3.cpp
+++ b/media/libstagefright/codecs/mp3dec/SoftMP3.cpp
@@ -49,6 +49,8 @@
       mNumChannels(2),
       mSamplingRate(44100),
       mSignalledError(false),
+      mSawInputEos(false),
+      mSignalledOutputEos(false),
       mOutputPortSettingsChange(NONE) {
     initPorts();
     initDecoder();
@@ -194,48 +196,36 @@
     List<BufferInfo *> &inQueue = getPortQueue(0);
     List<BufferInfo *> &outQueue = getPortQueue(1);
 
-    while (!inQueue.empty() && !outQueue.empty()) {
-        BufferInfo *inInfo = *inQueue.begin();
-        OMX_BUFFERHEADERTYPE *inHeader = inInfo->mHeader;
+    while ((!inQueue.empty() || (mSawInputEos && !mSignalledOutputEos)) && !outQueue.empty()) {
+        BufferInfo *inInfo = NULL;
+        OMX_BUFFERHEADERTYPE *inHeader = NULL;
+        if (!inQueue.empty()) {
+            inInfo = *inQueue.begin();
+            inHeader = inInfo->mHeader;
+        }
 
         BufferInfo *outInfo = *outQueue.begin();
         OMX_BUFFERHEADERTYPE *outHeader = outInfo->mHeader;
+        outHeader->nFlags = 0;
 
-        if (inHeader->nFlags & OMX_BUFFERFLAG_EOS) {
-            inQueue.erase(inQueue.begin());
-            inInfo->mOwnedByUs = false;
-            notifyEmptyBufferDone(inHeader);
-
-            if (!mIsFirst) {
-                // pad the end of the stream with 529 samples, since that many samples
-                // were trimmed off the beginning when decoding started
-                outHeader->nFilledLen =
-                    kPVMP3DecoderDelay * mNumChannels * sizeof(int16_t);
-
-                memset(outHeader->pBuffer, 0, outHeader->nFilledLen);
-            } else {
-                // Since we never discarded frames from the start, we won't have
-                // to add any padding at the end either.
-                outHeader->nFilledLen = 0;
+        if (inHeader) {
+            if (inHeader->nOffset == 0 && inHeader->nFilledLen) {
+                mAnchorTimeUs = inHeader->nTimeStamp;
+                mNumFramesOutput = 0;
             }
 
-            outHeader->nFlags = OMX_BUFFERFLAG_EOS;
+            if (inHeader->nFlags & OMX_BUFFERFLAG_EOS) {
+                mSawInputEos = true;
+            }
 
-            outQueue.erase(outQueue.begin());
-            outInfo->mOwnedByUs = false;
-            notifyFillBufferDone(outHeader);
-            return;
+            mConfig->pInputBuffer =
+                inHeader->pBuffer + inHeader->nOffset;
+
+            mConfig->inputBufferCurrentLength = inHeader->nFilledLen;
+        } else {
+            mConfig->pInputBuffer = NULL;
+            mConfig->inputBufferCurrentLength = 0;
         }
-
-        if (inHeader->nOffset == 0) {
-            mAnchorTimeUs = inHeader->nTimeStamp;
-            mNumFramesOutput = 0;
-        }
-
-        mConfig->pInputBuffer =
-            inHeader->pBuffer + inHeader->nOffset;
-
-        mConfig->inputBufferCurrentLength = inHeader->nFilledLen;
         mConfig->inputBufferMaxLength = 0;
         mConfig->inputBufferUsedLength = 0;
 
@@ -262,13 +252,28 @@
                 mConfig->outputFrameSize = kOutputBufferSize / sizeof(int16_t);
             }
 
-            // This is recoverable, just ignore the current frame and
-            // play silence instead.
-            memset(outHeader->pBuffer,
-                   0,
-                   mConfig->outputFrameSize * sizeof(int16_t));
+            if (decoderErr == NO_ENOUGH_MAIN_DATA_ERROR && mSawInputEos) {
+                if (!mIsFirst) {
+                    // pad the end of the stream with 529 samples, since that many samples
+                    // were trimmed off the beginning when decoding started
+                    outHeader->nOffset = 0;
+                    outHeader->nFilledLen = kPVMP3DecoderDelay * mNumChannels * sizeof(int16_t);
 
-            mConfig->inputBufferUsedLength = inHeader->nFilledLen;
+                    memset(outHeader->pBuffer, 0, outHeader->nFilledLen);
+                }
+                outHeader->nFlags = OMX_BUFFERFLAG_EOS;
+                mSignalledOutputEos = true;
+            } else {
+                // This is recoverable, just ignore the current frame and
+                // play silence instead.
+                memset(outHeader->pBuffer,
+                       0,
+                       mConfig->outputFrameSize * sizeof(int16_t));
+
+                if (inHeader) {
+                    mConfig->inputBufferUsedLength = inHeader->nFilledLen;
+                }
+            }
         } else if (mConfig->samplingRate != mSamplingRate
                 || mConfig->num_channels != mNumChannels) {
             mSamplingRate = mConfig->samplingRate;
@@ -289,7 +294,7 @@
 
             outHeader->nFilledLen =
                 mConfig->outputFrameSize * sizeof(int16_t) - outHeader->nOffset;
-        } else {
+        } else if (!mSignalledOutputEos) {
             outHeader->nOffset = 0;
             outHeader->nFilledLen = mConfig->outputFrameSize * sizeof(int16_t);
         }
@@ -298,23 +303,24 @@
             mAnchorTimeUs
                 + (mNumFramesOutput * 1000000ll) / mConfig->samplingRate;
 
-        outHeader->nFlags = 0;
+        if (inHeader) {
+            CHECK_GE(inHeader->nFilledLen, mConfig->inputBufferUsedLength);
 
-        CHECK_GE(inHeader->nFilledLen, mConfig->inputBufferUsedLength);
+            inHeader->nOffset += mConfig->inputBufferUsedLength;
+            inHeader->nFilledLen -= mConfig->inputBufferUsedLength;
 
-        inHeader->nOffset += mConfig->inputBufferUsedLength;
-        inHeader->nFilledLen -= mConfig->inputBufferUsedLength;
+
+            if (inHeader->nFilledLen == 0) {
+                inInfo->mOwnedByUs = false;
+                inQueue.erase(inQueue.begin());
+                inInfo = NULL;
+                notifyEmptyBufferDone(inHeader);
+                inHeader = NULL;
+            }
+        }
 
         mNumFramesOutput += mConfig->outputFrameSize / mNumChannels;
 
-        if (inHeader->nFilledLen == 0) {
-            inInfo->mOwnedByUs = false;
-            inQueue.erase(inQueue.begin());
-            inInfo = NULL;
-            notifyEmptyBufferDone(inHeader);
-            inHeader = NULL;
-        }
-
         outInfo->mOwnedByUs = false;
         outQueue.erase(outQueue.begin());
         outInfo = NULL;
@@ -362,6 +368,8 @@
     pvmp3_InitDecoder(mConfig, mDecoderBuf);
     mIsFirst = true;
     mSignalledError = false;
+    mSawInputEos = false;
+    mSignalledOutputEos = false;
     mOutputPortSettingsChange = NONE;
 }
 
diff --git a/media/libstagefright/codecs/mp3dec/SoftMP3.h b/media/libstagefright/codecs/mp3dec/SoftMP3.h
index 4af91ea..f9e7b53 100644
--- a/media/libstagefright/codecs/mp3dec/SoftMP3.h
+++ b/media/libstagefright/codecs/mp3dec/SoftMP3.h
@@ -61,6 +61,8 @@
 
     bool mIsFirst;
     bool mSignalledError;
+    bool mSawInputEos;
+    bool mSignalledOutputEos;
 
     enum {
         NONE,
diff --git a/media/libstagefright/codecs/vorbis/dec/SoftVorbis.cpp b/media/libstagefright/codecs/vorbis/dec/SoftVorbis.cpp
index 51bb958..515e4d3 100644
--- a/media/libstagefright/codecs/vorbis/dec/SoftVorbis.cpp
+++ b/media/libstagefright/codecs/vorbis/dec/SoftVorbis.cpp
@@ -54,6 +54,8 @@
       mAnchorTimeUs(0),
       mNumFramesOutput(0),
       mNumFramesLeftOnPage(-1),
+      mSawInputEos(false),
+      mSignalledOutputEos(false),
       mOutputPortSettingsChange(NONE) {
     initPorts();
     CHECK_EQ(initDecoder(), (status_t)OK);
@@ -290,48 +292,47 @@
         return;
     }
 
-    while (!inQueue.empty() && !outQueue.empty()) {
-        BufferInfo *inInfo = *inQueue.begin();
-        OMX_BUFFERHEADERTYPE *inHeader = inInfo->mHeader;
+    while ((!inQueue.empty() || (mSawInputEos && !mSignalledOutputEos)) && !outQueue.empty()) {
+        BufferInfo *inInfo = NULL;
+        OMX_BUFFERHEADERTYPE *inHeader = NULL;
+        if (!inQueue.empty()) {
+            inInfo = *inQueue.begin();
+            inHeader = inInfo->mHeader;
+        }
 
         BufferInfo *outInfo = *outQueue.begin();
         OMX_BUFFERHEADERTYPE *outHeader = outInfo->mHeader;
 
-        if (inHeader->nFlags & OMX_BUFFERFLAG_EOS) {
-            inQueue.erase(inQueue.begin());
-            inInfo->mOwnedByUs = false;
-            notifyEmptyBufferDone(inHeader);
+        int32_t numPageSamples = 0;
 
-            outHeader->nFilledLen = 0;
-            outHeader->nFlags = OMX_BUFFERFLAG_EOS;
+        if (inHeader) {
+            if (inHeader->nFlags & OMX_BUFFERFLAG_EOS) {
+                mSawInputEos = true;
+            }
 
-            outQueue.erase(outQueue.begin());
-            outInfo->mOwnedByUs = false;
-            notifyFillBufferDone(outHeader);
-            return;
+            if (inHeader->nFilledLen || !mSawInputEos) {
+                CHECK_GE(inHeader->nFilledLen, sizeof(numPageSamples));
+                memcpy(&numPageSamples,
+                       inHeader->pBuffer
+                        + inHeader->nOffset + inHeader->nFilledLen - 4,
+                       sizeof(numPageSamples));
+
+                if (inHeader->nOffset == 0) {
+                    mAnchorTimeUs = inHeader->nTimeStamp;
+                    mNumFramesOutput = 0;
+                }
+
+                inHeader->nFilledLen -= sizeof(numPageSamples);;
+            }
         }
 
-        int32_t numPageSamples;
-        CHECK_GE(inHeader->nFilledLen, sizeof(numPageSamples));
-        memcpy(&numPageSamples,
-               inHeader->pBuffer
-                + inHeader->nOffset + inHeader->nFilledLen - 4,
-               sizeof(numPageSamples));
-
         if (numPageSamples >= 0) {
             mNumFramesLeftOnPage = numPageSamples;
         }
 
-        if (inHeader->nOffset == 0) {
-            mAnchorTimeUs = inHeader->nTimeStamp;
-            mNumFramesOutput = 0;
-        }
-
-        inHeader->nFilledLen -= sizeof(numPageSamples);;
-
         ogg_buffer buf;
-        buf.data = inHeader->pBuffer + inHeader->nOffset;
-        buf.size = inHeader->nFilledLen;
+        buf.data = inHeader ? inHeader->pBuffer + inHeader->nOffset : NULL;
+        buf.size = inHeader ? inHeader->nFilledLen : 0;
         buf.refcount = 1;
         buf.ptr.owner = NULL;
 
@@ -351,6 +352,7 @@
 
         int numFrames = 0;
 
+        outHeader->nFlags = 0;
         int err = vorbis_dsp_synthesis(mState, &pack, 1);
         if (err != 0) {
             ALOGW("vorbis_dsp_synthesis returned %d", err);
@@ -370,13 +372,16 @@
                 ALOGV("discarding %d frames at end of page",
                      numFrames - mNumFramesLeftOnPage);
                 numFrames = mNumFramesLeftOnPage;
+                if (mSawInputEos) {
+                    outHeader->nFlags = OMX_BUFFERFLAG_EOS;
+                    mSignalledOutputEos = true;
+                }
             }
             mNumFramesLeftOnPage -= numFrames;
         }
 
         outHeader->nFilledLen = numFrames * sizeof(int16_t) * mVi->channels;
         outHeader->nOffset = 0;
-        outHeader->nFlags = 0;
 
         outHeader->nTimeStamp =
             mAnchorTimeUs
@@ -384,11 +389,13 @@
 
         mNumFramesOutput += numFrames;
 
-        inInfo->mOwnedByUs = false;
-        inQueue.erase(inQueue.begin());
-        inInfo = NULL;
-        notifyEmptyBufferDone(inHeader);
-        inHeader = NULL;
+        if (inHeader) {
+            inInfo->mOwnedByUs = false;
+            inQueue.erase(inQueue.begin());
+            inInfo = NULL;
+            notifyEmptyBufferDone(inHeader);
+            inHeader = NULL;
+        }
 
         outInfo->mOwnedByUs = false;
         outQueue.erase(outQueue.begin());
@@ -425,6 +432,8 @@
         mVi = NULL;
     }
 
+    mSawInputEos = false;
+    mSignalledOutputEos = false;
     mOutputPortSettingsChange = NONE;
 }
 
diff --git a/media/libstagefright/codecs/vorbis/dec/SoftVorbis.h b/media/libstagefright/codecs/vorbis/dec/SoftVorbis.h
index cb628a0..1d00816 100644
--- a/media/libstagefright/codecs/vorbis/dec/SoftVorbis.h
+++ b/media/libstagefright/codecs/vorbis/dec/SoftVorbis.h
@@ -59,6 +59,8 @@
     int64_t mAnchorTimeUs;
     int64_t mNumFramesOutput;
     int32_t mNumFramesLeftOnPage;
+    bool mSawInputEos;
+    bool mSignalledOutputEos;
 
     enum {
         NONE,
diff --git a/media/libstagefright/mpeg2ts/ATSParser.cpp b/media/libstagefright/mpeg2ts/ATSParser.cpp
index 9850a46..f87b9da 100644
--- a/media/libstagefright/mpeg2ts/ATSParser.cpp
+++ b/media/libstagefright/mpeg2ts/ATSParser.cpp
@@ -506,6 +506,11 @@
                     ElementaryStreamQueue::PCM_AUDIO);
             break;
 
+        case STREAMTYPE_AC3:
+            mQueue = new ElementaryStreamQueue(
+                    ElementaryStreamQueue::AC3);
+            break;
+
         default:
             break;
     }
@@ -614,6 +619,7 @@
         case STREAMTYPE_MPEG2_AUDIO:
         case STREAMTYPE_MPEG2_AUDIO_ADTS:
         case STREAMTYPE_PCM_AUDIO:
+        case STREAMTYPE_AC3:
             return true;
 
         default:
diff --git a/media/libstagefright/mpeg2ts/ATSParser.h b/media/libstagefright/mpeg2ts/ATSParser.h
index a10edc9..d4e30b4 100644
--- a/media/libstagefright/mpeg2ts/ATSParser.h
+++ b/media/libstagefright/mpeg2ts/ATSParser.h
@@ -88,6 +88,10 @@
         STREAMTYPE_MPEG2_AUDIO_ADTS     = 0x0f,
         STREAMTYPE_MPEG4_VIDEO          = 0x10,
         STREAMTYPE_H264                 = 0x1b,
+
+        // From ATSC A/53 Part 3:2009, 6.7.1
+        STREAMTYPE_AC3                  = 0x81,
+
         STREAMTYPE_PCM_AUDIO            = 0x83,
     };
 
diff --git a/media/libstagefright/mpeg2ts/ESQueue.cpp b/media/libstagefright/mpeg2ts/ESQueue.cpp
index 8f9c9c8..ea79885 100644
--- a/media/libstagefright/mpeg2ts/ESQueue.cpp
+++ b/media/libstagefright/mpeg2ts/ESQueue.cpp
@@ -56,6 +56,122 @@
     }
 }
 
+// Parse AC3 header assuming the current ptr is start position of syncframe,
+// update metadata only applicable, and return the payload size
+static unsigned parseAC3SyncFrame(
+        const uint8_t *ptr, size_t size, sp<MetaData> *metaData) {
+    static const unsigned channelCountTable[] = {2, 1, 2, 3, 4, 4, 5, 6};
+    static const unsigned samplingRateTable[] = {48000, 44100, 32000};
+    static const unsigned rates[] = {32, 40, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 256,
+            320, 384, 448, 512, 576, 640};
+
+    static const unsigned frameSizeTable[19][3] = {
+        { 64, 69, 96 },
+        { 80, 87, 120 },
+        { 96, 104, 144 },
+        { 112, 121, 168 },
+        { 128, 139, 192 },
+        { 160, 174, 240 },
+        { 192, 208, 288 },
+        { 224, 243, 336 },
+        { 256, 278, 384 },
+        { 320, 348, 480 },
+        { 384, 417, 576 },
+        { 448, 487, 672 },
+        { 512, 557, 768 },
+        { 640, 696, 960 },
+        { 768, 835, 1152 },
+        { 896, 975, 1344 },
+        { 1024, 1114, 1536 },
+        { 1152, 1253, 1728 },
+        { 1280, 1393, 1920 },
+    };
+
+    ABitReader bits(ptr, size);
+    unsigned syncStartPos = 0;  // in bytes
+    if (bits.numBitsLeft() < 16) {
+        return 0;
+    }
+    if (bits.getBits(16) != 0x0B77) {
+        return 0;
+    }
+
+    if (bits.numBitsLeft() < 16 + 2 + 6 + 5 + 3 + 3) {
+        ALOGV("Not enough bits left for further parsing");
+        return 0;
+    }
+    bits.skipBits(16);  // crc1
+
+    unsigned fscod = bits.getBits(2);
+    if (fscod == 3) {
+        ALOGW("Incorrect fscod in AC3 header");
+        return 0;
+    }
+
+    unsigned frmsizecod = bits.getBits(6);
+    if (frmsizecod > 37) {
+        ALOGW("Incorrect frmsizecod in AC3 header");
+        return 0;
+    }
+
+    unsigned bsid = bits.getBits(5);
+    if (bsid > 8) {
+        ALOGW("Incorrect bsid in AC3 header. Possibly E-AC-3?");
+        return 0;
+    }
+
+    unsigned bsmod = bits.getBits(3);
+    unsigned acmod = bits.getBits(3);
+    unsigned cmixlev = 0;
+    unsigned surmixlev = 0;
+    unsigned dsurmod = 0;
+
+    if ((acmod & 1) > 0 && acmod != 1) {
+        if (bits.numBitsLeft() < 2) {
+            return 0;
+        }
+        cmixlev = bits.getBits(2);
+    }
+    if ((acmod & 4) > 0) {
+        if (bits.numBitsLeft() < 2) {
+            return 0;
+        }
+        surmixlev = bits.getBits(2);
+    }
+    if (acmod == 2) {
+        if (bits.numBitsLeft() < 2) {
+            return 0;
+        }
+        dsurmod = bits.getBits(2);
+    }
+
+    if (bits.numBitsLeft() < 1) {
+        return 0;
+    }
+    unsigned lfeon = bits.getBits(1);
+
+    unsigned samplingRate = samplingRateTable[fscod];
+    unsigned payloadSize = frameSizeTable[frmsizecod >> 1][fscod];
+    if (fscod == 1) {
+        payloadSize += frmsizecod & 1;
+    }
+    payloadSize <<= 1;  // convert from 16-bit words to bytes
+
+    unsigned channelCount = channelCountTable[acmod] + lfeon;
+
+    if (metaData != NULL) {
+        (*metaData)->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_AC3);
+        (*metaData)->setInt32(kKeyChannelCount, channelCount);
+        (*metaData)->setInt32(kKeySampleRate, samplingRate);
+    }
+
+    return payloadSize;
+}
+
+static bool IsSeeminglyValidAC3Header(const uint8_t *ptr, size_t size) {
+    return parseAC3SyncFrame(ptr, size, NULL) > 0;
+}
+
 static bool IsSeeminglyValidADTSHeader(const uint8_t *ptr, size_t size) {
     if (size < 3) {
         // Not enough data to verify header.
@@ -224,6 +340,33 @@
                 break;
             }
 
+            case AC3:
+            {
+                uint8_t *ptr = (uint8_t *)data;
+
+                ssize_t startOffset = -1;
+                for (size_t i = 0; i < size; ++i) {
+                    if (IsSeeminglyValidAC3Header(&ptr[i], size - i)) {
+                        startOffset = i;
+                        break;
+                    }
+                }
+
+                if (startOffset < 0) {
+                    return ERROR_MALFORMED;
+                }
+
+                if (startOffset > 0) {
+                    ALOGI("found something resembling an AC3 syncword at "
+                          "offset %d",
+                          startOffset);
+                }
+
+                data = &ptr[startOffset];
+                size -= startOffset;
+                break;
+            }
+
             case MPEG_AUDIO:
             {
                 uint8_t *ptr = (uint8_t *)data;
@@ -328,6 +471,8 @@
             return dequeueAccessUnitH264();
         case AAC:
             return dequeueAccessUnitAAC();
+        case AC3:
+            return dequeueAccessUnitAC3();
         case MPEG_VIDEO:
             return dequeueAccessUnitMPEGVideo();
         case MPEG4_VIDEO:
@@ -340,6 +485,51 @@
     }
 }
 
+sp<ABuffer> ElementaryStreamQueue::dequeueAccessUnitAC3() {
+    unsigned syncStartPos = 0;  // in bytes
+    unsigned payloadSize = 0;
+    sp<MetaData> format = new MetaData;
+    while (true) {
+        if (syncStartPos + 2 >= mBuffer->size()) {
+            return NULL;
+        }
+
+        payloadSize = parseAC3SyncFrame(
+                mBuffer->data() + syncStartPos,
+                mBuffer->size() - syncStartPos,
+                &format);
+        if (payloadSize > 0) {
+            break;
+        }
+        ++syncStartPos;
+    }
+
+    if (mBuffer->size() < syncStartPos + payloadSize) {
+        ALOGV("Not enough buffer size for AC3");
+        return NULL;
+    }
+
+    if (mFormat == NULL) {
+        mFormat = format;
+    }
+
+    sp<ABuffer> accessUnit = new ABuffer(syncStartPos + payloadSize);
+    memcpy(accessUnit->data(), mBuffer->data(), syncStartPos + payloadSize);
+
+    int64_t timeUs = fetchTimestamp(syncStartPos + payloadSize);
+    CHECK_GE(timeUs, 0ll);
+    accessUnit->meta()->setInt64("timeUs", timeUs);
+
+    memmove(
+            mBuffer->data(),
+            mBuffer->data() + syncStartPos + payloadSize,
+            mBuffer->size() - syncStartPos - payloadSize);
+
+    mBuffer->setRange(0, mBuffer->size() - syncStartPos - payloadSize);
+
+    return accessUnit;
+}
+
 sp<ABuffer> ElementaryStreamQueue::dequeueAccessUnitPCMAudio() {
     if (mBuffer->size() < 4) {
         return NULL;
diff --git a/media/libstagefright/mpeg2ts/ESQueue.h b/media/libstagefright/mpeg2ts/ESQueue.h
index 66a8087..a2cca77 100644
--- a/media/libstagefright/mpeg2ts/ESQueue.h
+++ b/media/libstagefright/mpeg2ts/ESQueue.h
@@ -32,6 +32,7 @@
     enum Mode {
         H264,
         AAC,
+        AC3,
         MPEG_AUDIO,
         MPEG_VIDEO,
         MPEG4_VIDEO,
@@ -67,6 +68,7 @@
 
     sp<ABuffer> dequeueAccessUnitH264();
     sp<ABuffer> dequeueAccessUnitAAC();
+    sp<ABuffer> dequeueAccessUnitAC3();
     sp<ABuffer> dequeueAccessUnitMPEGAudio();
     sp<ABuffer> dequeueAccessUnitMPEGVideo();
     sp<ABuffer> dequeueAccessUnitMPEG4Video();
diff --git a/media/libstagefright/timedtext/test/Android.mk b/media/libstagefright/timedtext/test/Android.mk
index a5e7ba2..9a9fde2 100644
--- a/media/libstagefright/timedtext/test/Android.mk
+++ b/media/libstagefright/timedtext/test/Android.mk
@@ -2,7 +2,6 @@
 
 # ================================================================
 # Unit tests for libstagefright_timedtext
-# See also /development/testrunner/test_defs.xml
 # ================================================================
 
 # ================================================================
@@ -18,10 +17,13 @@
 
 LOCAL_C_INCLUDES := \
     $(TOP)/external/expat/lib \
-    $(TOP)/frameworks/base/media/libstagefright/timedtext
+    $(TOP)/frameworks/av/media/libstagefright/timedtext
 
 LOCAL_SHARED_LIBRARIES := \
+    libbinder \
     libexpat \
-    libstagefright
+    libstagefright \
+    libstagefright_foundation \
+    libutils
 
 include $(BUILD_NATIVE_TEST)
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 3132e54..c9c9f8a 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -162,12 +162,15 @@
         (void) property_get("af.tee", value, "0");
         teeEnabled = atoi(value);
     }
-    if (teeEnabled & 1)
+    if (teeEnabled & 1) {
         mTeeSinkInputEnabled = true;
-    if (teeEnabled & 2)
+    }
+    if (teeEnabled & 2) {
         mTeeSinkOutputEnabled = true;
-    if (teeEnabled & 4)
+    }
+    if (teeEnabled & 4) {
         mTeeSinkTrackEnabled = true;
+    }
 #endif
 }
 
@@ -513,10 +516,12 @@
 
         track = thread->createTrack_l(client, streamType, sampleRate, format,
                 channelMask, frameCount, sharedBuffer, lSessionId, flags, tid, clientUid, &lStatus);
+        // we don't abort yet if lStatus != NO_ERROR; there is still work to be done regardless
 
         // move effect chain to this output thread if an effect on same session was waiting
         // for a track to be created
         if (lStatus == NO_ERROR && effectThread != NULL) {
+            // no risk of deadlock because AudioFlinger::mLock is held
             Mutex::Autolock _dl(thread->mLock);
             Mutex::Autolock _sl(effectThread->mLock);
             moveEffectChain_l(lSessionId, effectThread, thread, true);
@@ -536,7 +541,9 @@
                 }
             }
         }
+
     }
+
     if (lStatus == NO_ERROR) {
         // s for server's pid, n for normal mixer name, f for fast index
         name = String8::format("s:%d;n:%d;f:%d", getpid_cached, track->name() - AudioMixer::TRACK0,
@@ -550,9 +557,7 @@
     }
 
 Exit:
-    if (status != NULL) {
-        *status = lStatus;
-    }
+    *status = lStatus;
     return trackHandle;
 }
 
@@ -1293,6 +1298,7 @@
                                                   flags, tid, &lStatus);
         LOG_ALWAYS_FATAL_IF((recordTrack != 0) != (lStatus == NO_ERROR));
     }
+
     if (lStatus != NO_ERROR) {
         // remove local strong reference to Client before deleting the RecordTrack so that the
         // Client destructor is called by the TrackBase destructor with mLock held
@@ -1301,14 +1307,11 @@
         goto Exit;
     }
 
-    // return to handle to client
+    // return handle to client
     recordHandle = new RecordHandle(recordTrack);
-    lStatus = NO_ERROR;
 
 Exit:
-    if (status) {
-        *status = lStatus;
-    }
+    *status = lStatus;
     return recordHandle;
 }
 
@@ -1449,18 +1452,15 @@
                                            audio_output_flags_t flags,
                                            const audio_offload_info_t *offloadInfo)
 {
-    PlaybackThread *thread = NULL;
     struct audio_config config;
+    memset(&config, 0, sizeof(config));
     config.sample_rate = (pSamplingRate != NULL) ? *pSamplingRate : 0;
     config.channel_mask = (pChannelMask != NULL) ? *pChannelMask : 0;
     config.format = (pFormat != NULL) ? *pFormat : AUDIO_FORMAT_DEFAULT;
-    if (offloadInfo) {
+    if (offloadInfo != NULL) {
         config.offload_info = *offloadInfo;
     }
 
-    audio_stream_out_t *outStream = NULL;
-    AudioHwDevice *outHwDev;
-
     ALOGV("openOutput(), module %d Device %x, SamplingRate %d, Format %#08x, Channels %x, flags %x",
               module,
               (pDevices != NULL) ? *pDevices : 0,
@@ -1469,7 +1469,7 @@
               config.channel_mask,
               flags);
     ALOGV("openOutput(), offloadInfo %p version 0x%04x",
-          offloadInfo, offloadInfo == NULL ? -1 : offloadInfo->version );
+          offloadInfo, offloadInfo == NULL ? -1 : offloadInfo->version);
 
     if (pDevices == NULL || *pDevices == 0) {
         return 0;
@@ -1477,15 +1477,17 @@
 
     Mutex::Autolock _l(mLock);
 
-    outHwDev = findSuitableHwDev_l(module, *pDevices);
-    if (outHwDev == NULL)
+    AudioHwDevice *outHwDev = findSuitableHwDev_l(module, *pDevices);
+    if (outHwDev == NULL) {
         return 0;
+    }
 
     audio_hw_device_t *hwDevHal = outHwDev->hwDevice();
     audio_io_handle_t id = nextUniqueId();
 
     mHardwareStatus = AUDIO_HW_OUTPUT_OPEN;
 
+    audio_stream_out_t *outStream = NULL;
     status_t status = hwDevHal->open_output_stream(hwDevHal,
                                           id,
                                           *pDevices,
@@ -1505,6 +1507,7 @@
     if (status == NO_ERROR && outStream != NULL) {
         AudioStreamOut *output = new AudioStreamOut(outHwDev, outStream, flags);
 
+        PlaybackThread *thread;
         if (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
             thread = new OffloadThread(this, output, id, *pDevices);
             ALOGV("openOutput() created offload output: ID %d thread %p", id, thread);
@@ -1672,18 +1675,15 @@
                                           audio_format_t *pFormat,
                                           audio_channel_mask_t *pChannelMask)
 {
-    status_t status;
-    RecordThread *thread = NULL;
     struct audio_config config;
+    memset(&config, 0, sizeof(config));
     config.sample_rate = (pSamplingRate != NULL) ? *pSamplingRate : 0;
     config.channel_mask = (pChannelMask != NULL) ? *pChannelMask : 0;
     config.format = (pFormat != NULL) ? *pFormat : AUDIO_FORMAT_DEFAULT;
 
     uint32_t reqSamplingRate = config.sample_rate;
     audio_format_t reqFormat = config.format;
-    audio_channel_mask_t reqChannels = config.channel_mask;
-    audio_stream_in_t *inStream = NULL;
-    AudioHwDevice *inHwDev;
+    audio_channel_mask_t reqChannelMask = config.channel_mask;
 
     if (pDevices == NULL || *pDevices == 0) {
         return 0;
@@ -1691,14 +1691,16 @@
 
     Mutex::Autolock _l(mLock);
 
-    inHwDev = findSuitableHwDev_l(module, *pDevices);
-    if (inHwDev == NULL)
+    AudioHwDevice *inHwDev = findSuitableHwDev_l(module, *pDevices);
+    if (inHwDev == NULL) {
         return 0;
+    }
 
     audio_hw_device_t *inHwHal = inHwDev->hwDevice();
     audio_io_handle_t id = nextUniqueId();
 
-    status = inHwHal->open_input_stream(inHwHal, id, *pDevices, &config,
+    audio_stream_in_t *inStream = NULL;
+    status_t status = inHwHal->open_input_stream(inHwHal, id, *pDevices, &config,
                                         &inStream);
     ALOGV("openInput() openInputStream returned input %p, SamplingRate %d, Format %d, Channels %x, "
             "status %d",
@@ -1714,10 +1716,12 @@
     if (status == BAD_VALUE &&
         reqFormat == config.format && config.format == AUDIO_FORMAT_PCM_16_BIT &&
         (config.sample_rate <= 2 * reqSamplingRate) &&
-        (popcount(config.channel_mask) <= FCC_2) && (popcount(reqChannels) <= FCC_2)) {
+        (popcount(config.channel_mask) <= FCC_2) && (popcount(reqChannelMask) <= FCC_2)) {
+        // FIXME describe the change proposed by HAL (save old values so we can log them here)
         ALOGV("openInput() reopening with proposed sampling rate and channel mask");
         inStream = NULL;
         status = inHwHal->open_input_stream(inHwHal, id, *pDevices, &config, &inStream);
+        // FIXME log this new status; HAL should not propose any further changes
     }
 
     if (status == NO_ERROR && inStream != NULL) {
@@ -1776,10 +1780,10 @@
         // Start record thread
         // RecordThread requires both input and output device indication to forward to audio
         // pre processing modules
-        thread = new RecordThread(this,
+        RecordThread *thread = new RecordThread(this,
                                   input,
                                   reqSamplingRate,
-                                  reqChannels,
+                                  reqChannelMask,
                                   id,
                                   primaryOutputDevice_l(),
                                   *pDevices
@@ -1796,7 +1800,7 @@
             *pFormat = config.format;
         }
         if (pChannelMask != NULL) {
-            *pChannelMask = reqChannels;
+            *pChannelMask = reqChannelMask;
         }
 
         // notify client processes of the new input creation
@@ -1954,7 +1958,7 @@
             }
         }
         if (!found) {
-            Mutex::Autolock _l (t->mLock);
+            Mutex::Autolock _l(t->mLock);
             // remove all effects from the chain
             while (ec->mEffects.size()) {
                 sp<EffectModule> effect = ec->mEffects[0];
@@ -2249,9 +2253,7 @@
     }
 
 Exit:
-    if (status != NULL) {
-        *status = lStatus;
-    }
+    *status = lStatus;
     return handle;
 }
 
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index 53e238e..9137040 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -110,7 +110,7 @@
                                 int *sessionId,
                                 String8& name,
                                 int clientUid,
-                                status_t *status);
+                                status_t *status /*non-NULL*/);
 
     virtual sp<IAudioRecord> openRecord(
                                 audio_io_handle_t input,
@@ -121,7 +121,7 @@
                                 IAudioFlinger::track_flags_t *flags,
                                 pid_t tid,
                                 int *sessionId,
-                                status_t *status);
+                                status_t *status /*non-NULL*/);
 
     virtual     uint32_t    sampleRate(audio_io_handle_t output) const;
     virtual     int         channelCount(audio_io_handle_t output) const;
@@ -210,7 +210,7 @@
                         int32_t priority,
                         audio_io_handle_t io,
                         int sessionId,
-                        status_t *status,
+                        status_t *status /*non-NULL*/,
                         int *id,
                         int *enabled);
 
@@ -499,7 +499,7 @@
     private:
         const char * const mModuleName;
         audio_hw_device_t * const mHwDevice;
-        Flags mFlags;
+        const Flags mFlags;
     };
 
     // AudioStreamOut and AudioStreamIn are immutable, so their fields are const.
@@ -509,7 +509,7 @@
     struct AudioStreamOut {
         AudioHwDevice* const audioHwDev;
         audio_stream_out_t* const stream;
-        audio_output_flags_t flags;
+        const audio_output_flags_t flags;
 
         audio_hw_device_t* hwDev() const { return audioHwDev->hwDevice(); }
 
diff --git a/services/audioflinger/AudioMixer.cpp b/services/audioflinger/AudioMixer.cpp
index df4e029..8bea752 100644
--- a/services/audioflinger/AudioMixer.cpp
+++ b/services/audioflinger/AudioMixer.cpp
@@ -58,7 +58,7 @@
 status_t AudioMixer::DownmixerBufferProvider::getNextBuffer(AudioBufferProvider::Buffer *pBuffer,
         int64_t pts) {
     //ALOGV("DownmixerBufferProvider::getNextBuffer()");
-    if (this->mTrackBufferProvider != NULL) {
+    if (mTrackBufferProvider != NULL) {
         status_t res = mTrackBufferProvider->getNextBuffer(pBuffer, pts);
         if (res == OK) {
             mDownmixConfig.inputCfg.buffer.frameCount = pBuffer->frameCount;
@@ -81,7 +81,7 @@
 
 void AudioMixer::DownmixerBufferProvider::releaseBuffer(AudioBufferProvider::Buffer *pBuffer) {
     //ALOGV("DownmixerBufferProvider::releaseBuffer()");
-    if (this->mTrackBufferProvider != NULL) {
+    if (mTrackBufferProvider != NULL) {
         mTrackBufferProvider->releaseBuffer(pBuffer);
     } else {
         ALOGE("DownmixerBufferProvider::releaseBuffer() error: NULL track buffer provider");
@@ -90,9 +90,9 @@
 
 
 // ----------------------------------------------------------------------------
-bool AudioMixer::isMultichannelCapable = false;
+bool AudioMixer::sIsMultichannelCapable = false;
 
-effect_descriptor_t AudioMixer::dwnmFxDesc;
+effect_descriptor_t AudioMixer::sDwnmFxDesc;
 
 // Ensure mConfiguredNames bitmask is initialized properly on all architectures.
 // The value of 1 << x is undefined in C when x >= 32.
@@ -113,8 +113,6 @@
     // AudioMixer is not yet capable of multi-channel output beyond stereo
     ALOG_ASSERT(2 == MAX_NUM_CHANNELS, "bad MAX_NUM_CHANNELS %d", MAX_NUM_CHANNELS);
 
-    LocalClock lc;
-
     pthread_once(&sOnceControl, &sInitRoutine);
 
     mState.enabledTracks= 0;
@@ -136,27 +134,6 @@
         t++;
     }
 
-    // find multichannel downmix effect if we have to play multichannel content
-    uint32_t numEffects = 0;
-    int ret = EffectQueryNumberEffects(&numEffects);
-    if (ret != 0) {
-        ALOGE("AudioMixer() error %d querying number of effects", ret);
-        return;
-    }
-    ALOGV("EffectQueryNumberEffects() numEffects=%d", numEffects);
-
-    for (uint32_t i = 0 ; i < numEffects ; i++) {
-        if (EffectQueryEffect(i, &dwnmFxDesc) == 0) {
-            ALOGV("effect %d is called %s", i, dwnmFxDesc.name);
-            if (memcmp(&dwnmFxDesc.type, EFFECT_UIID_DOWNMIX, sizeof(effect_uuid_t)) == 0) {
-                ALOGI("found effect \"%s\" from %s",
-                        dwnmFxDesc.name, dwnmFxDesc.implementor);
-                isMultichannelCapable = true;
-                break;
-            }
-        }
-    }
-    ALOGE_IF(!isMultichannelCapable, "unable to find downmix effect");
 }
 
 AudioMixer::~AudioMixer()
@@ -229,7 +206,7 @@
 
 void AudioMixer::invalidateState(uint32_t mask)
 {
-    if (mask) {
+    if (mask != 0) {
         mState.needsChanged |= mask;
         mState.hook = process__validate;
     }
@@ -276,13 +253,13 @@
     DownmixerBufferProvider* pDbp = new DownmixerBufferProvider();
     int32_t status;
 
-    if (!isMultichannelCapable) {
+    if (!sIsMultichannelCapable) {
         ALOGE("prepareTrackForDownmix(%d) fails: mixer doesn't support multichannel content",
                 trackName);
         goto noDownmixForActiveTrack;
     }
 
-    if (EffectCreate(&dwnmFxDesc.uuid,
+    if (EffectCreate(&sDwnmFxDesc.uuid,
             pTrack->sessionId /*sessionId*/, -2 /*ioId not relevant here, using random value*/,
             &pDbp->mDownmixHandle/*pHandle*/) != 0) {
         ALOGE("prepareTrackForDownmix(%d) fails: error creating downmixer effect", trackName);
@@ -566,7 +543,7 @@
                 resampler = AudioResampler::create(
                         format,
                         // the resampler sees the number of channels after the downmixer, if any
-                        downmixerBufferProvider != NULL ? MAX_NUM_CHANNELS : channelCount,
+                        (int) (downmixerBufferProvider != NULL ? MAX_NUM_CHANNELS : channelCount),
                         devSampleRate, quality);
                 resampler->setLocalTimeFreq(sLocalTimeFreq);
             }
@@ -667,27 +644,29 @@
         countActiveTracks++;
         track_t& t = state->tracks[i];
         uint32_t n = 0;
+        // FIXME can overflow (mask is only 3 bits)
         n |= NEEDS_CHANNEL_1 + t.channelCount - 1;
-        n |= NEEDS_FORMAT_16;
-        n |= t.doesResample() ? NEEDS_RESAMPLE_ENABLED : NEEDS_RESAMPLE_DISABLED;
+        if (t.doesResample()) {
+            n |= NEEDS_RESAMPLE;
+        }
         if (t.auxLevel != 0 && t.auxBuffer != NULL) {
-            n |= NEEDS_AUX_ENABLED;
+            n |= NEEDS_AUX;
         }
 
         if (t.volumeInc[0]|t.volumeInc[1]) {
             volumeRamp = true;
         } else if (!t.doesResample() && t.volumeRL == 0) {
-            n |= NEEDS_MUTE_ENABLED;
+            n |= NEEDS_MUTE;
         }
         t.needs = n;
 
-        if ((n & NEEDS_MUTE__MASK) == NEEDS_MUTE_ENABLED) {
+        if (n & NEEDS_MUTE) {
             t.hook = track__nop;
         } else {
-            if ((n & NEEDS_AUX__MASK) == NEEDS_AUX_ENABLED) {
+            if (n & NEEDS_AUX) {
                 all16BitsStereoNoResample = false;
             }
-            if ((n & NEEDS_RESAMPLE__MASK) == NEEDS_RESAMPLE_ENABLED) {
+            if (n & NEEDS_RESAMPLE) {
                 all16BitsStereoNoResample = false;
                 resampling = true;
                 t.hook = track__genericResample;
@@ -709,7 +688,7 @@
 
     // select the processing hooks
     state->hook = process__nop;
-    if (countActiveTracks) {
+    if (countActiveTracks > 0) {
         if (resampling) {
             if (!state->outputTemp) {
                 state->outputTemp = new int32_t[MAX_NUM_CHANNELS * state->frameCount];
@@ -745,16 +724,15 @@
 
     // Now that the volume ramp has been done, set optimal state and
     // track hooks for subsequent mixer process
-    if (countActiveTracks) {
+    if (countActiveTracks > 0) {
         bool allMuted = true;
         uint32_t en = state->enabledTracks;
         while (en) {
             const int i = 31 - __builtin_clz(en);
             en &= ~(1<<i);
             track_t& t = state->tracks[i];
-            if (!t.doesResample() && t.volumeRL == 0)
-            {
-                t.needs |= NEEDS_MUTE_ENABLED;
+            if (!t.doesResample() && t.volumeRL == 0) {
+                t.needs |= NEEDS_MUTE;
                 t.hook = track__nop;
             } else {
                 allMuted = false;
@@ -1124,8 +1102,9 @@
         t.in = t.buffer.raw;
         // t.in == NULL can happen if the track was flushed just after having
         // been enabled for mixing.
-        if (t.in == NULL)
+        if (t.in == NULL) {
             enabledTracks &= ~(1<<i);
+        }
     }
 
     e0 = enabledTracks;
@@ -1157,12 +1136,12 @@
                 track_t& t = state->tracks[i];
                 size_t outFrames = BLOCKSIZE;
                 int32_t *aux = NULL;
-                if (CC_UNLIKELY((t.needs & NEEDS_AUX__MASK) == NEEDS_AUX_ENABLED)) {
+                if (CC_UNLIKELY(t.needs & NEEDS_AUX)) {
                     aux = t.auxBuffer + numFrames;
                 }
                 while (outFrames) {
                     size_t inFrames = (t.frameCount > outFrames)?outFrames:t.frameCount;
-                    if (inFrames) {
+                    if (inFrames > 0) {
                         t.hook(&t, outTemp + (BLOCKSIZE-outFrames)*MAX_NUM_CHANNELS, inFrames,
                                 state->resampleTemp, aux);
                         t.frameCount -= inFrames;
@@ -1238,14 +1217,14 @@
             e1 &= ~(1<<i);
             track_t& t = state->tracks[i];
             int32_t *aux = NULL;
-            if (CC_UNLIKELY((t.needs & NEEDS_AUX__MASK) == NEEDS_AUX_ENABLED)) {
+            if (CC_UNLIKELY(t.needs & NEEDS_AUX)) {
                 aux = t.auxBuffer;
             }
 
             // this is a little goofy, on the resampling case we don't
             // acquire/release the buffers because it's done by
             // the resampler.
-            if ((t.needs & NEEDS_RESAMPLE__MASK) == NEEDS_RESAMPLE_ENABLED) {
+            if (t.needs & NEEDS_RESAMPLE) {
                 t.resampler->setPTS(pts);
                 t.hook(&t, outTemp, numFrames, state->resampleTemp, aux);
             } else {
@@ -1445,8 +1424,9 @@
 int64_t AudioMixer::calculateOutputPTS(const track_t& t, int64_t basePTS,
                                        int outputFrameIndex)
 {
-    if (AudioBufferProvider::kInvalidPTS == basePTS)
+    if (AudioBufferProvider::kInvalidPTS == basePTS) {
         return AudioBufferProvider::kInvalidPTS;
+    }
 
     return basePTS + ((outputFrameIndex * sLocalTimeFreq) / t.sampleRate);
 }
@@ -1458,6 +1438,28 @@
 {
     LocalClock lc;
     sLocalTimeFreq = lc.getLocalFreq();
+
+    // find multichannel downmix effect if we have to play multichannel content
+    uint32_t numEffects = 0;
+    int ret = EffectQueryNumberEffects(&numEffects);
+    if (ret != 0) {
+        ALOGE("AudioMixer() error %d querying number of effects", ret);
+        return;
+    }
+    ALOGV("EffectQueryNumberEffects() numEffects=%d", numEffects);
+
+    for (uint32_t i = 0 ; i < numEffects ; i++) {
+        if (EffectQueryEffect(i, &sDwnmFxDesc) == 0) {
+            ALOGV("effect %d is called %s", i, sDwnmFxDesc.name);
+            if (memcmp(&sDwnmFxDesc.type, EFFECT_UIID_DOWNMIX, sizeof(effect_uuid_t)) == 0) {
+                ALOGI("found effect \"%s\" from %s",
+                        sDwnmFxDesc.name, sDwnmFxDesc.implementor);
+                sIsMultichannelCapable = true;
+                break;
+            }
+        }
+    }
+    ALOGW_IF(!sIsMultichannelCapable, "unable to find downmix effect");
 }
 
 // ----------------------------------------------------------------------------
diff --git a/services/audioflinger/AudioMixer.h b/services/audioflinger/AudioMixer.h
index 43aeb86..d5c9da7 100644
--- a/services/audioflinger/AudioMixer.h
+++ b/services/audioflinger/AudioMixer.h
@@ -120,27 +120,19 @@
 private:
 
     enum {
+        // FIXME this representation permits up to 8 channels
         NEEDS_CHANNEL_COUNT__MASK   = 0x00000007,
-        NEEDS_FORMAT__MASK          = 0x000000F0,
-        NEEDS_MUTE__MASK            = 0x00000100,
-        NEEDS_RESAMPLE__MASK        = 0x00001000,
-        NEEDS_AUX__MASK             = 0x00010000,
     };
 
     enum {
-        NEEDS_CHANNEL_1             = 0x00000000,
-        NEEDS_CHANNEL_2             = 0x00000001,
+        NEEDS_CHANNEL_1             = 0x00000000,   // mono
+        NEEDS_CHANNEL_2             = 0x00000001,   // stereo
 
-        NEEDS_FORMAT_16             = 0x00000010,
+        // sample format is not explicitly specified, and is assumed to be AUDIO_FORMAT_PCM_16_BIT
 
-        NEEDS_MUTE_DISABLED         = 0x00000000,
-        NEEDS_MUTE_ENABLED          = 0x00000100,
-
-        NEEDS_RESAMPLE_DISABLED     = 0x00000000,
-        NEEDS_RESAMPLE_ENABLED      = 0x00001000,
-
-        NEEDS_AUX_DISABLED     = 0x00000000,
-        NEEDS_AUX_ENABLED      = 0x00010000,
+        NEEDS_MUTE                  = 0x00000100,
+        NEEDS_RESAMPLE              = 0x00001000,
+        NEEDS_AUX                   = 0x00010000,
     };
 
     struct state_t;
@@ -256,9 +248,9 @@
     state_t         mState __attribute__((aligned(32)));
 
     // effect descriptor for the downmixer used by the mixer
-    static effect_descriptor_t dwnmFxDesc;
+    static effect_descriptor_t sDwnmFxDesc;
     // indicates whether a downmix effect has been found and is usable by this mixer
-    static bool                isMultichannelCapable;
+    static bool                sIsMultichannelCapable;
 
     // Call after changing either the enabled status of a track, or parameters of an enabled track.
     // OK to call more often than that, but unnecessary.
diff --git a/services/audioflinger/AudioPolicyService.cpp b/services/audioflinger/AudioPolicyService.cpp
index 35e816b..c5ad2c0 100644
--- a/services/audioflinger/AudioPolicyService.cpp
+++ b/services/audioflinger/AudioPolicyService.cpp
@@ -77,24 +77,28 @@
     mOutputCommandThread = new AudioCommandThread(String8("ApmOutput"), this);
     /* instantiate the audio policy manager */
     rc = hw_get_module(AUDIO_POLICY_HARDWARE_MODULE_ID, &module);
-    if (rc)
+    if (rc) {
         return;
+    }
 
     rc = audio_policy_dev_open(module, &mpAudioPolicyDev);
     ALOGE_IF(rc, "couldn't open audio policy device (%s)", strerror(-rc));
-    if (rc)
+    if (rc) {
         return;
+    }
 
     rc = mpAudioPolicyDev->create_audio_policy(mpAudioPolicyDev, &aps_ops, this,
                                                &mpAudioPolicy);
     ALOGE_IF(rc, "couldn't create audio policy (%s)", strerror(-rc));
-    if (rc)
+    if (rc) {
         return;
+    }
 
     rc = mpAudioPolicy->init_check(mpAudioPolicy);
     ALOGE_IF(rc, "couldn't init_check the audio policy (%s)", strerror(-rc));
-    if (rc)
+    if (rc) {
         return;
+    }
 
     ALOGI("Loaded audio policy from %s (%s)", module->name, module->id);
 
@@ -126,10 +130,12 @@
     }
     mInputs.clear();
 
-    if (mpAudioPolicy != NULL && mpAudioPolicyDev != NULL)
+    if (mpAudioPolicy != NULL && mpAudioPolicyDev != NULL) {
         mpAudioPolicyDev->destroy_audio_policy(mpAudioPolicyDev, mpAudioPolicy);
-    if (mpAudioPolicyDev != NULL)
+    }
+    if (mpAudioPolicyDev != NULL) {
         audio_policy_dev_close(mpAudioPolicyDev);
+    }
 }
 
 status_t AudioPolicyService::setDeviceConnectionState(audio_devices_t device,
@@ -1114,11 +1120,13 @@
 int AudioPolicyService::startTone(audio_policy_tone_t tone,
                                   audio_stream_type_t stream)
 {
-    if (tone != AUDIO_POLICY_TONE_IN_CALL_NOTIFICATION)
+    if (tone != AUDIO_POLICY_TONE_IN_CALL_NOTIFICATION) {
         ALOGE("startTone: illegal tone requested (%d)", tone);
-    if (stream != AUDIO_STREAM_VOICE_CALL)
+    }
+    if (stream != AUDIO_STREAM_VOICE_CALL) {
         ALOGE("startTone: illegal stream (%d) requested for tone %d", stream,
             tone);
+    }
     mTonePlaybackThread->startToneCommand(ToneGenerator::TONE_SUP_CALL_WAITING,
                                           AUDIO_STREAM_VOICE_CALL);
     return 0;
@@ -1509,8 +1517,9 @@
 static int aps_close_output(void *service, audio_io_handle_t output)
 {
     sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
-    if (af == 0)
+    if (af == 0) {
         return PERMISSION_DENIED;
+    }
 
     return af->closeOutput(output);
 }
@@ -1573,8 +1582,9 @@
 static int aps_close_input(void *service, audio_io_handle_t input)
 {
     sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
-    if (af == 0)
+    if (af == 0) {
         return PERMISSION_DENIED;
+    }
 
     return af->closeInput(input);
 }
@@ -1583,8 +1593,9 @@
                                      audio_io_handle_t output)
 {
     sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
-    if (af == 0)
+    if (af == 0) {
         return PERMISSION_DENIED;
+    }
 
     return af->setStreamOutput(stream, output);
 }
@@ -1594,8 +1605,9 @@
                                 audio_io_handle_t dst_output)
 {
     sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
-    if (af == 0)
+    if (af == 0) {
         return PERMISSION_DENIED;
+    }
 
     return af->moveEffects(session, src_output, dst_output);
 }
diff --git a/services/audioflinger/AudioResampler.cpp b/services/audioflinger/AudioResampler.cpp
index 2c3c719..323f1a4 100644
--- a/services/audioflinger/AudioResampler.cpp
+++ b/services/audioflinger/AudioResampler.cpp
@@ -339,8 +339,9 @@
             out[outputIndex++] += vl * Interp(mX0L, in[0], phaseFraction);
             out[outputIndex++] += vr * Interp(mX0R, in[1], phaseFraction);
             Advance(&inputIndex, &phaseFraction, phaseIncrement);
-            if (outputIndex == outputSampleCount)
+            if (outputIndex == outputSampleCount) {
                 break;
+            }
         }
 
         // process input samples
@@ -434,8 +435,9 @@
             out[outputIndex++] += vl * sample;
             out[outputIndex++] += vr * sample;
             Advance(&inputIndex, &phaseFraction, phaseIncrement);
-            if (outputIndex == outputSampleCount)
+            if (outputIndex == outputSampleCount) {
                 break;
+            }
         }
 
         // process input samples
diff --git a/services/audioflinger/AudioResamplerCubic.cpp b/services/audioflinger/AudioResamplerCubic.cpp
index 18e59e9..1f9714b 100644
--- a/services/audioflinger/AudioResamplerCubic.cpp
+++ b/services/audioflinger/AudioResamplerCubic.cpp
@@ -66,8 +66,9 @@
     if (mBuffer.frameCount == 0) {
         mBuffer.frameCount = inFrameCount;
         provider->getNextBuffer(&mBuffer, mPTS);
-        if (mBuffer.raw == NULL)
+        if (mBuffer.raw == NULL) {
             return;
+        }
         // ALOGW("New buffer: offset=%p, frames=%dn", mBuffer.raw, mBuffer.frameCount);
     }
     int16_t *in = mBuffer.i16;
@@ -97,8 +98,9 @@
                 mBuffer.frameCount = inFrameCount;
                 provider->getNextBuffer(&mBuffer,
                                         calculateOutputPTS(outputIndex / 2));
-                if (mBuffer.raw == NULL)
+                if (mBuffer.raw == NULL) {
                     goto save_state;  // ugly, but efficient
+                }
                 in = mBuffer.i16;
                 // ALOGW("New buffer: offset=%p, frames=%d", mBuffer.raw, mBuffer.frameCount);
             }
@@ -132,8 +134,9 @@
     if (mBuffer.frameCount == 0) {
         mBuffer.frameCount = inFrameCount;
         provider->getNextBuffer(&mBuffer, mPTS);
-        if (mBuffer.raw == NULL)
+        if (mBuffer.raw == NULL) {
             return;
+        }
         // ALOGW("New buffer: offset=%p, frames=%d", mBuffer.raw, mBuffer.frameCount);
     }
     int16_t *in = mBuffer.i16;
@@ -163,8 +166,9 @@
                 mBuffer.frameCount = inFrameCount;
                 provider->getNextBuffer(&mBuffer,
                                         calculateOutputPTS(outputIndex / 2));
-                if (mBuffer.raw == NULL)
+                if (mBuffer.raw == NULL) {
                     goto save_state;  // ugly, but efficient
+                }
                 // ALOGW("New buffer: offset=%p, frames=%dn", mBuffer.raw, mBuffer.frameCount);
                 in = mBuffer.i16;
             }
diff --git a/services/audioflinger/Effects.cpp b/services/audioflinger/Effects.cpp
index a8a5169..bb98a35 100644
--- a/services/audioflinger/Effects.cpp
+++ b/services/audioflinger/Effects.cpp
@@ -116,8 +116,9 @@
             continue;
         }
         // first non destroyed handle is considered in control
-        if (controlHandle == NULL)
+        if (controlHandle == NULL) {
             controlHandle = h;
+        }
         if (h->priority() <= priority) {
             break;
         }
diff --git a/services/audioflinger/FastMixer.cpp b/services/audioflinger/FastMixer.cpp
index f27ea17..7126e92 100644
--- a/services/audioflinger/FastMixer.cpp
+++ b/services/audioflinger/FastMixer.cpp
@@ -459,8 +459,9 @@
             }
 
             int64_t pts;
-            if (outputSink == NULL || (OK != outputSink->getNextWriteTimestamp(&pts)))
+            if (outputSink == NULL || (OK != outputSink->getNextWriteTimestamp(&pts))) {
                 pts = AudioBufferProvider::kInvalidPTS;
+            }
 
             // process() is CPU-bound
             mixer->process(pts);
diff --git a/services/audioflinger/PlaybackTracks.h b/services/audioflinger/PlaybackTracks.h
index 43b77f3..4b6c74d 100644
--- a/services/audioflinger/PlaybackTracks.h
+++ b/services/audioflinger/PlaybackTracks.h
@@ -34,6 +34,7 @@
                                 int uid,
                                 IAudioFlinger::track_flags_t flags);
     virtual             ~Track();
+    virtual status_t    initCheck() const;
 
     static  void        appendDumpHeader(String8& result);
             void        dump(char* buffer, size_t size);
diff --git a/services/audioflinger/RecordTracks.h b/services/audioflinger/RecordTracks.h
index 57de568..5ef6f58 100644
--- a/services/audioflinger/RecordTracks.h
+++ b/services/audioflinger/RecordTracks.h
@@ -59,5 +59,4 @@
     // releaseBuffer() not overridden
 
     bool                mOverflow;  // overflow on most recent attempt to fill client buffer
-    AudioRecordServerProxy* mAudioRecordServerProxy;
 };
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index ba8195e..6987dbd 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -269,8 +269,8 @@
     :   Thread(false /*canCallJava*/),
         mType(type),
         mAudioFlinger(audioFlinger),
-        // mSampleRate, mFrameCount, mChannelMask, mChannelCount, mFrameSize, and mFormat are
-        // set by PlaybackThread::readOutputParameters() or RecordThread::readInputParameters()
+        // mSampleRate, mFrameCount, mChannelMask, mChannelCount, mFrameSize, mFormat, mBufferSize
+        // are set by PlaybackThread::readOutputParameters() or RecordThread::readInputParameters()
         mParamStatus(NO_ERROR),
         //FIXME: mStandby should be true here. Is this some kind of hack?
         mStandby(false), mOutDevice(outDevice), mInDevice(inDevice),
@@ -297,6 +297,17 @@
     }
 }
 
+status_t AudioFlinger::ThreadBase::readyToRun()
+{
+    status_t status = initCheck();
+    if (status == NO_ERROR) {
+        ALOGI("AudioFlinger's thread %p ready to run", this);
+    } else {
+        ALOGE("No working audio driver found.");
+    }
+    return status;
+}
+
 void AudioFlinger::ThreadBase::exit()
 {
     ALOGV("ThreadBase::exit");
@@ -369,7 +380,13 @@
 
 void AudioFlinger::ThreadBase::processConfigEvents()
 {
-    mLock.lock();
+    Mutex::Autolock _l(mLock);
+    processConfigEvents_l();
+}
+
+// post condition: mConfigEvents.isEmpty()
+void AudioFlinger::ThreadBase::processConfigEvents_l()
+{
     while (!mConfigEvents.isEmpty()) {
         ALOGV("processConfigEvents() remaining events %d", mConfigEvents.size());
         ConfigEvent *event = mConfigEvents[0];
@@ -377,32 +394,31 @@
         // release mLock before locking AudioFlinger mLock: lock order is always
         // AudioFlinger then ThreadBase to avoid cross deadlock
         mLock.unlock();
-        switch(event->type()) {
-            case CFG_EVENT_PRIO: {
-                PrioConfigEvent *prioEvent = static_cast<PrioConfigEvent *>(event);
-                // FIXME Need to understand why this has be done asynchronously
-                int err = requestPriority(prioEvent->pid(), prioEvent->tid(), prioEvent->prio(),
-                        true /*asynchronous*/);
-                if (err != 0) {
-                    ALOGW("Policy SCHED_FIFO priority %d is unavailable for pid %d tid %d; "
-                          "error %d",
-                          prioEvent->prio(), prioEvent->pid(), prioEvent->tid(), err);
-                }
-            } break;
-            case CFG_EVENT_IO: {
-                IoConfigEvent *ioEvent = static_cast<IoConfigEvent *>(event);
-                mAudioFlinger->mLock.lock();
+        switch (event->type()) {
+        case CFG_EVENT_PRIO: {
+            PrioConfigEvent *prioEvent = static_cast<PrioConfigEvent *>(event);
+            // FIXME Need to understand why this has be done asynchronously
+            int err = requestPriority(prioEvent->pid(), prioEvent->tid(), prioEvent->prio(),
+                    true /*asynchronous*/);
+            if (err != 0) {
+                ALOGW("Policy SCHED_FIFO priority %d is unavailable for pid %d tid %d; error %d",
+                      prioEvent->prio(), prioEvent->pid(), prioEvent->tid(), err);
+            }
+        } break;
+        case CFG_EVENT_IO: {
+            IoConfigEvent *ioEvent = static_cast<IoConfigEvent *>(event);
+            {
+                Mutex::Autolock _l(mAudioFlinger->mLock);
                 audioConfigChanged_l(ioEvent->event(), ioEvent->param());
-                mAudioFlinger->mLock.unlock();
-            } break;
-            default:
-                ALOGE("processConfigEvents() unknown event type %d", event->type());
-                break;
+            }
+        } break;
+        default:
+            ALOGE("processConfigEvents() unknown event type %d", event->type());
+            break;
         }
         delete event;
         mLock.lock();
     }
-    mLock.unlock();
 }
 
 void AudioFlinger::ThreadBase::dumpBase(int fd, const Vector<String16>& args)
@@ -427,6 +443,8 @@
     result.append(buffer);
     snprintf(buffer, SIZE, "HAL frame count: %d\n", mFrameCount);
     result.append(buffer);
+    snprintf(buffer, SIZE, "HAL buffer size: %u bytes\n", mBufferSize);
+    result.append(buffer);
     snprintf(buffer, SIZE, "Channel Count: %u\n", mChannelCount);
     result.append(buffer);
     snprintf(buffer, SIZE, "Channel Mask: 0x%08x\n", mChannelMask);
@@ -739,8 +757,7 @@
         int sessionId,
         effect_descriptor_t *desc,
         int *enabled,
-        status_t *status
-        )
+        status_t *status)
 {
     sp<EffectModule> effect;
     sp<EffectHandle> handle;
@@ -850,9 +867,7 @@
         handle.clear();
     }
 
-    if (status != NULL) {
-        *status = lStatus;
-    }
+    *status = lStatus;
     return handle;
 }
 
@@ -1002,7 +1017,7 @@
                                              type_t type)
     :   ThreadBase(audioFlinger, id, device, AUDIO_DEVICE_NONE, type),
         mNormalFrameCount(0), mMixBuffer(NULL),
-        mAllocMixBuffer(NULL), mSuspended(0), mBytesWritten(0),
+        mSuspended(0), mBytesWritten(0),
         mActiveTracksGeneration(0),
         // mStreamTypes[] initialized in constructor body
         mOutput(output),
@@ -1060,7 +1075,7 @@
 AudioFlinger::PlaybackThread::~PlaybackThread()
 {
     mAudioFlinger->unregisterWriter(mNBLogWriter);
-    delete [] mAllocMixBuffer;
+    delete[] mMixBuffer;
 }
 
 void AudioFlinger::PlaybackThread::dump(int fd, const Vector<String16>& args)
@@ -1150,16 +1165,6 @@
 }
 
 // Thread virtuals
-status_t AudioFlinger::PlaybackThread::readyToRun()
-{
-    status_t status = initCheck();
-    if (status == NO_ERROR) {
-        ALOGI("AudioFlinger's thread %p ready to run", this);
-    } else {
-        ALOGE("No working audio driver found.");
-    }
-    return status;
-}
 
 void AudioFlinger::PlaybackThread::onFirstRef()
 {
@@ -1326,8 +1331,12 @@
             track = TimedTrack::create(this, client, streamType, sampleRate, format,
                     channelMask, frameCount, sharedBuffer, sessionId, uid);
         }
-        if (track == 0 || track->getCblk() == NULL || track->name() < 0) {
-            lStatus = NO_MEMORY;
+
+        // new Track always returns non-NULL,
+        // but TimedTrack::create() is a factory that could fail by returning NULL
+        lStatus = track != 0 ? track->initCheck() : (status_t) NO_MEMORY;
+        if (lStatus != NO_ERROR) {
+            track.clear();
             goto Exit;
         }
 
@@ -1352,9 +1361,7 @@
     lStatus = NO_ERROR;
 
 Exit:
-    if (status) {
-        *status = lStatus;
-    }
+    *status = lStatus;
     return track;
 }
 
@@ -1642,7 +1649,8 @@
                 mFormat);
     }
     mFrameSize = audio_stream_frame_size(&mOutput->stream->common);
-    mFrameCount = mOutput->stream->common.get_buffer_size(&mOutput->stream->common) / mFrameSize;
+    mBufferSize = mOutput->stream->common.get_buffer_size(&mOutput->stream->common);
+    mFrameCount = mBufferSize / mFrameSize;
     if (mFrameCount & 15) {
         ALOGW("HAL output buffer size is %u frames but AudioMixer requires multiples of 16 frames",
                 mFrameCount);
@@ -1699,11 +1707,11 @@
     ALOGI("HAL output buffer size %u frames, normal mix buffer size %u frames", mFrameCount,
             mNormalFrameCount);
 
-    delete[] mAllocMixBuffer;
-    size_t align = (mFrameSize < sizeof(int16_t)) ? sizeof(int16_t) : mFrameSize;
-    mAllocMixBuffer = new int8_t[mNormalFrameCount * mFrameSize + align - 1];
-    mMixBuffer = (int16_t *) ((((size_t)mAllocMixBuffer + align - 1) / align) * align);
-    memset(mMixBuffer, 0, mNormalFrameCount * mFrameSize);
+    delete[] mMixBuffer;
+    size_t normalBufferSize = mNormalFrameCount * mFrameSize;
+    // For historical reasons mMixBuffer is int16_t[], but mFrameSize can be odd (such as 1)
+    mMixBuffer = new int16_t[(normalBufferSize + 1) >> 1];
+    memset(mMixBuffer, 0, normalBufferSize);
 
     // force reconfiguration of effect chains and engines to take new buffer size and audio
     // parameters into account
@@ -1837,7 +1845,7 @@
         const Vector< sp<Track> >& tracksToRemove)
 {
     size_t count = tracksToRemove.size();
-    if (count) {
+    if (count > 0) {
         for (size_t i = 0 ; i < count ; i++) {
             const sp<Track>& track = tracksToRemove.itemAt(i);
             if (!track->isOutputTrack()) {
@@ -2405,7 +2413,7 @@
 void AudioFlinger::PlaybackThread::removeTracks_l(const Vector< sp<Track> >& tracksToRemove)
 {
     size_t count = tracksToRemove.size();
-    if (count) {
+    if (count > 0) {
         for (size_t i=0 ; i<count ; i++) {
             const sp<Track>& track = tracksToRemove.itemAt(i);
             mActiveTracks.remove(track);
@@ -2798,7 +2806,7 @@
             sleepTime = idleSleepTime;
         }
     } else if (mBytesWritten != 0 || (mMixerStatus == MIXER_TRACKS_ENABLED)) {
-        memset (mMixBuffer, 0, mixBufferSize);
+        memset(mMixBuffer, 0, mixBufferSize);
         sleepTime = 0;
         ALOGV_IF(mBytesWritten == 0 && (mMixerStatus == MIXER_TRACKS_ENABLED),
                 "anticipated start");
@@ -3024,7 +3032,7 @@
             // +1 for rounding and +1 for additional sample needed for interpolation
             desiredFrames = (mNormalFrameCount * sr) / mSampleRate + 1 + 1;
             // add frames already consumed but not yet released by the resampler
-            // because cblk->framesReady() will include these frames
+            // because mAudioTrackServerProxy->framesReady() will include these frames
             desiredFrames += mAudioMixer->getUnreleasedFrames(track->name());
             // the minimum track buffer size is normally twice the number of frames necessary
             // to fill one buffer and the resampler should not leave more than one buffer worth
@@ -3362,6 +3370,7 @@
             if ((audio_format_t) value != AUDIO_FORMAT_PCM_16_BIT) {
                 status = BAD_VALUE;
             } else {
+                // no need to save value, since it's constant
                 reconfig = true;
             }
         }
@@ -3369,6 +3378,7 @@
             if ((audio_channel_mask_t) value != AUDIO_CHANNEL_OUT_STEREO) {
                 status = BAD_VALUE;
             } else {
+                // no need to save value, since it's constant
                 reconfig = true;
             }
         }
@@ -4152,15 +4162,15 @@
 // must be called with thread mutex locked
 bool AudioFlinger::OffloadThread::shouldStandby_l()
 {
-    bool TrackPaused = false;
+    bool trackPaused = false;
 
     // do not put the HAL in standby when paused. AwesomePlayer clear the offloaded AudioTrack
     // after a timeout and we will enter standby then.
     if (mTracks.size() > 0) {
-        TrackPaused = mTracks[mTracks.size() - 1]->isPaused();
+        trackPaused = mTracks[mTracks.size() - 1]->isPaused();
     }
 
-    return !mStandby && !TrackPaused;
+    return !mStandby && !trackPaused;
 }
 
 
@@ -4368,7 +4378,9 @@
                                          ) :
     ThreadBase(audioFlinger, id, outDevice, inDevice, RECORD),
     mInput(input), mResampler(NULL), mRsmpOutBuffer(NULL), mRsmpInBuffer(NULL),
-    // mRsmpInIndex and mBufferSize set by readInputParameters()
+    // mRsmpInFrames, mRsmpInFramesP2, mRsmpInUnrel, mRsmpInFront, and mRsmpInRear
+    //      are set by readInputParameters()
+    // mRsmpInIndex LEGACY
     mReqChannelCount(popcount(channelMask)),
     mReqSampleRate(sampleRate)
     // mBytesRead is only meaningful while active, and so is cleared in start()
@@ -4395,22 +4407,12 @@
     run(mName, PRIORITY_URGENT_AUDIO);
 }
 
-status_t AudioFlinger::RecordThread::readyToRun()
-{
-    status_t status = initCheck();
-    ALOGW_IF(status != NO_ERROR,"RecordThread %p could not initialize", this);
-    return status;
-}
-
 bool AudioFlinger::RecordThread::threadLoop()
 {
-    AudioBufferProvider::Buffer buffer;
-    sp<RecordTrack> activeTrack;
-    Vector< sp<EffectChain> > effectChains;
-
     nsecs_t lastWarning = 0;
 
     inputStandBy();
+    sp<RecordTrack> activeTrack;
     {
         Mutex::Autolock _l(mLock);
         activeTrack = mActiveTrack;
@@ -4420,27 +4422,38 @@
     // used to verify we've read at least once before evaluating how many bytes were read
     bool readOnce = false;
 
-    // start recording
-    while (!exitPending()) {
+    // used to request a deferred sleep, to be executed later while mutex is unlocked
+    bool doSleep = false;
 
-        processConfigEvents();
+    // start recording
+    for (;;) {
+        TrackBase::track_state activeTrackState;
+        Vector< sp<EffectChain> > effectChains;
+
+        // sleep with mutex unlocked
+        if (doSleep) {
+            doSleep = false;
+            usleep(kRecordThreadSleepUs);
+        }
 
         { // scope for mLock
             Mutex::Autolock _l(mLock);
-            checkForNewParameters_l();
+            if (exitPending()) {
+                break;
+            }
+            processConfigEvents_l();
+            // return value 'reconfig' is currently unused
+            bool reconfig = checkForNewParameters_l();
             if (mActiveTrack != 0 && activeTrack != mActiveTrack) {
                 SortedVector<int> tmp;
                 tmp.add(mActiveTrack->uid());
                 updateWakeLockUids_l(tmp);
             }
+            // make a stable copy of mActiveTrack
             activeTrack = mActiveTrack;
-            if (mActiveTrack == 0 && mConfigEvents.isEmpty()) {
-                standby();
-
-                if (exitPending()) {
-                    break;
-                }
-
+            if (activeTrack == 0) {
+                standbyIfNotAlreadyInStandby();
+                // exitPending() can't become true here
                 releaseWakeLock_l();
                 ALOGV("RecordThread: loop stopping");
                 // go to sleep
@@ -4449,176 +4462,238 @@
                 acquireWakeLock_l(mActiveTrack != 0 ? mActiveTrack->uid() : -1);
                 continue;
             }
-            if (mActiveTrack != 0) {
-                if (mActiveTrack->isTerminated()) {
-                    removeTrack_l(mActiveTrack);
-                    mActiveTrack.clear();
-                } else if (mActiveTrack->mState == TrackBase::PAUSING) {
-                    standby();
+
+            if (activeTrack->isTerminated()) {
+                removeTrack_l(activeTrack);
+                mActiveTrack.clear();
+                continue;
+            }
+
+            activeTrackState = activeTrack->mState;
+            switch (activeTrackState) {
+            case TrackBase::PAUSING:
+                standbyIfNotAlreadyInStandby();
+                mActiveTrack.clear();
+                mStartStopCond.broadcast();
+                doSleep = true;
+                continue;
+
+            case TrackBase::RESUMING:
+                mStandby = false;
+                if (mReqChannelCount != activeTrack->channelCount()) {
                     mActiveTrack.clear();
                     mStartStopCond.broadcast();
-                } else if (mActiveTrack->mState == TrackBase::RESUMING) {
-                    if (mReqChannelCount != mActiveTrack->channelCount()) {
-                        mActiveTrack.clear();
-                        mStartStopCond.broadcast();
-                    } else if (readOnce) {
-                        // record start succeeds only if first read from audio input
-                        // succeeds
-                        if (mBytesRead >= 0) {
-                            mActiveTrack->mState = TrackBase::ACTIVE;
-                        } else {
-                            mActiveTrack.clear();
-                        }
-                        mStartStopCond.broadcast();
-                    }
-                    mStandby = false;
+                    continue;
                 }
+                if (readOnce) {
+                    mStartStopCond.broadcast();
+                    // record start succeeds only if first read from audio input succeeds
+                    if (mBytesRead < 0) {
+                        mActiveTrack.clear();
+                        continue;
+                    }
+                    activeTrack->mState = TrackBase::ACTIVE;
+                }
+                break;
+
+            case TrackBase::ACTIVE:
+                break;
+
+            case TrackBase::IDLE:
+                doSleep = true;
+                continue;
+
+            default:
+                LOG_FATAL("Unexpected activeTrackState %d", activeTrackState);
             }
 
             lockEffectChains_l(effectChains);
         }
 
-        if (mActiveTrack != 0) {
-            if (mActiveTrack->mState != TrackBase::ACTIVE &&
-                mActiveTrack->mState != TrackBase::RESUMING) {
-                unlockEffectChains(effectChains);
-                usleep(kRecordThreadSleepUs);
-                continue;
-            }
-            for (size_t i = 0; i < effectChains.size(); i ++) {
-                effectChains[i]->process_l();
-            }
+        // thread mutex is now unlocked, mActiveTrack unknown, activeTrack != 0, kept, immutable
+        // activeTrack->mState unknown, activeTrackState immutable and is ACTIVE or RESUMING
 
-            buffer.frameCount = mFrameCount;
-            status_t status = mActiveTrack->getNextBuffer(&buffer);
-            if (status == NO_ERROR) {
-                readOnce = true;
-                size_t framesOut = buffer.frameCount;
-                if (mResampler == NULL) {
-                    // no resampling
-                    while (framesOut) {
-                        size_t framesIn = mFrameCount - mRsmpInIndex;
-                        if (framesIn) {
-                            int8_t *src = (int8_t *)mRsmpInBuffer + mRsmpInIndex * mFrameSize;
-                            int8_t *dst = buffer.i8 + (buffer.frameCount - framesOut) *
-                                    mActiveTrack->mFrameSize;
-                            if (framesIn > framesOut)
-                                framesIn = framesOut;
-                            mRsmpInIndex += framesIn;
-                            framesOut -= framesIn;
-                            if (mChannelCount == mReqChannelCount) {
-                                memcpy(dst, src, framesIn * mFrameSize);
-                            } else {
-                                if (mChannelCount == 1) {
-                                    upmix_to_stereo_i16_from_mono_i16((int16_t *)dst,
-                                            (int16_t *)src, framesIn);
-                                } else {
-                                    downmix_to_mono_i16_from_stereo_i16((int16_t *)dst,
-                                            (int16_t *)src, framesIn);
-                                }
-                            }
-                        }
-                        if (framesOut && mFrameCount == mRsmpInIndex) {
-                            void *readInto;
-                            if (framesOut == mFrameCount && mChannelCount == mReqChannelCount) {
-                                readInto = buffer.raw;
-                                framesOut = 0;
-                            } else {
-                                readInto = mRsmpInBuffer;
-                                mRsmpInIndex = 0;
-                            }
-                            mBytesRead = mInput->stream->read(mInput->stream, readInto,
-                                    mBufferSize);
-                            if (mBytesRead <= 0) {
-                                if ((mBytesRead < 0) && (mActiveTrack->mState == TrackBase::ACTIVE))
-                                {
-                                    ALOGE("Error reading audio input");
-                                    // Force input into standby so that it tries to
-                                    // recover at next read attempt
-                                    inputStandBy();
-                                    usleep(kRecordThreadSleepUs);
-                                }
-                                mRsmpInIndex = mFrameCount;
-                                framesOut = 0;
-                                buffer.frameCount = 0;
-                            }
-#ifdef TEE_SINK
-                            else if (mTeeSink != 0) {
-                                (void) mTeeSink->write(readInto,
-                                        mBytesRead >> Format_frameBitShift(mTeeSink->format()));
-                            }
-#endif
-                        }
-                    }
-                } else {
-                    // resampling
-
-                    // resampler accumulates, but we only have one source track
-                    memset(mRsmpOutBuffer, 0, framesOut * FCC_2 * sizeof(int32_t));
-                    // alter output frame count as if we were expecting stereo samples
-                    if (mChannelCount == 1 && mReqChannelCount == 1) {
-                        framesOut >>= 1;
-                    }
-                    mResampler->resample(mRsmpOutBuffer, framesOut,
-                            this /* AudioBufferProvider* */);
-                    // ditherAndClamp() works as long as all buffers returned by
-                    // mActiveTrack->getNextBuffer() are 32 bit aligned which should be always true.
-                    if (mChannelCount == 2 && mReqChannelCount == 1) {
-                        // temporarily type pun mRsmpOutBuffer from Q19.12 to int16_t
-                        ditherAndClamp(mRsmpOutBuffer, mRsmpOutBuffer, framesOut);
-                        // the resampler always outputs stereo samples:
-                        // do post stereo to mono conversion
-                        downmix_to_mono_i16_from_stereo_i16(buffer.i16, (int16_t *)mRsmpOutBuffer,
-                                framesOut);
-                    } else {
-                        ditherAndClamp((int32_t *)buffer.raw, mRsmpOutBuffer, framesOut);
-                    }
-                    // now done with mRsmpOutBuffer
-
-                }
-                if (mFramestoDrop == 0) {
-                    mActiveTrack->releaseBuffer(&buffer);
-                } else {
-                    if (mFramestoDrop > 0) {
-                        mFramestoDrop -= buffer.frameCount;
-                        if (mFramestoDrop <= 0) {
-                            clearSyncStartEvent();
-                        }
-                    } else {
-                        mFramestoDrop += buffer.frameCount;
-                        if (mFramestoDrop >= 0 || mSyncStartEvent == 0 ||
-                                mSyncStartEvent->isCancelled()) {
-                            ALOGW("Synced record %s, session %d, trigger session %d",
-                                  (mFramestoDrop >= 0) ? "timed out" : "cancelled",
-                                  mActiveTrack->sessionId(),
-                                  (mSyncStartEvent != 0) ? mSyncStartEvent->triggerSession() : 0);
-                            clearSyncStartEvent();
-                        }
-                    }
-                }
-                mActiveTrack->clearOverflow();
-            }
-            // client isn't retrieving buffers fast enough
-            else {
-                if (!mActiveTrack->setOverflow()) {
-                    nsecs_t now = systemTime();
-                    if ((now - lastWarning) > kWarningThrottleNs) {
-                        ALOGW("RecordThread: buffer overflow");
-                        lastWarning = now;
-                    }
-                }
-                // Release the processor for a while before asking for a new buffer.
-                // This will give the application more chance to read from the buffer and
-                // clear the overflow.
-                usleep(kRecordThreadSleepUs);
-            }
+        for (size_t i = 0; i < effectChains.size(); i ++) {
+            // thread mutex is not locked, but effect chain is locked
+            effectChains[i]->process_l();
         }
+
+        AudioBufferProvider::Buffer buffer;
+        buffer.frameCount = mFrameCount;
+        status_t status = activeTrack->getNextBuffer(&buffer);
+        if (status == NO_ERROR) {
+            readOnce = true;
+            size_t framesOut = buffer.frameCount;
+            if (mResampler == NULL) {
+                // no resampling
+                while (framesOut) {
+                    size_t framesIn = mFrameCount - mRsmpInIndex;
+                    if (framesIn > 0) {
+                        int8_t *src = (int8_t *)mRsmpInBuffer + mRsmpInIndex * mFrameSize;
+                        int8_t *dst = buffer.i8 + (buffer.frameCount - framesOut) *
+                                activeTrack->mFrameSize;
+                        if (framesIn > framesOut) {
+                            framesIn = framesOut;
+                        }
+                        mRsmpInIndex += framesIn;
+                        framesOut -= framesIn;
+                        if (mChannelCount == mReqChannelCount) {
+                            memcpy(dst, src, framesIn * mFrameSize);
+                        } else {
+                            if (mChannelCount == 1) {
+                                upmix_to_stereo_i16_from_mono_i16((int16_t *)dst,
+                                        (int16_t *)src, framesIn);
+                            } else {
+                                downmix_to_mono_i16_from_stereo_i16((int16_t *)dst,
+                                        (int16_t *)src, framesIn);
+                            }
+                        }
+                    }
+                    if (framesOut > 0 && mFrameCount == mRsmpInIndex) {
+                        void *readInto;
+                        if (framesOut == mFrameCount && mChannelCount == mReqChannelCount) {
+                            readInto = buffer.raw;
+                            framesOut = 0;
+                        } else {
+                            readInto = mRsmpInBuffer;
+                            mRsmpInIndex = 0;
+                        }
+                        mBytesRead = mInput->stream->read(mInput->stream, readInto,
+                                mBufferSize);
+                        if (mBytesRead <= 0) {
+                            // TODO: verify that it's benign to use a stale track state
+                            if ((mBytesRead < 0) && (activeTrackState == TrackBase::ACTIVE))
+                            {
+                                ALOGE("Error reading audio input");
+                                // Force input into standby so that it tries to
+                                // recover at next read attempt
+                                inputStandBy();
+                                doSleep = true;
+                            }
+                            mRsmpInIndex = mFrameCount;
+                            framesOut = 0;
+                            buffer.frameCount = 0;
+                        }
+#ifdef TEE_SINK
+                        else if (mTeeSink != 0) {
+                            (void) mTeeSink->write(readInto,
+                                    mBytesRead >> Format_frameBitShift(mTeeSink->format()));
+                        }
+#endif
+                    }
+                }
+            } else {
+                // resampling
+
+                // avoid busy-waiting if client doesn't keep up
+                bool madeProgress = false;
+
+                // keep mRsmpInBuffer full so resampler always has sufficient input
+                for (;;) {
+                    int32_t rear = mRsmpInRear;
+                    ssize_t filled = rear - mRsmpInFront;
+                    ALOG_ASSERT(0 <= filled && (size_t) filled <= mRsmpInFramesP2);
+                    // exit once there is enough data in buffer for resampler
+                    if ((size_t) filled >= mRsmpInFrames) {
+                        break;
+                    }
+                    size_t avail = mRsmpInFramesP2 - filled;
+                    // Only try to read full HAL buffers.
+                    // But if the HAL read returns a partial buffer, use it.
+                    if (avail < mFrameCount) {
+                        ALOGE("insufficient space to read: avail %d < mFrameCount %d",
+                                avail, mFrameCount);
+                        break;
+                    }
+                    // If 'avail' is non-contiguous, first read past the nominal end of buffer, then
+                    // copy to the right place.  Permitted because mRsmpInBuffer was over-allocated.
+                    rear &= mRsmpInFramesP2 - 1;
+                    mBytesRead = mInput->stream->read(mInput->stream,
+                            &mRsmpInBuffer[rear * mChannelCount], mBufferSize);
+                    if (mBytesRead <= 0) {
+                        ALOGE("read failed: mBytesRead=%d < %u", mBytesRead, mBufferSize);
+                        break;
+                    }
+                    ALOG_ASSERT((size_t) mBytesRead <= mBufferSize);
+                    size_t framesRead = mBytesRead / mFrameSize;
+                    ALOG_ASSERT(framesRead > 0);
+                    madeProgress = true;
+                    // If 'avail' was non-contiguous, we now correct for reading past end of buffer.
+                    size_t part1 = mRsmpInFramesP2 - rear;
+                    if (framesRead > part1) {
+                        memcpy(mRsmpInBuffer, &mRsmpInBuffer[mRsmpInFramesP2 * mChannelCount],
+                                (framesRead - part1) * mFrameSize);
+                    }
+                    mRsmpInRear += framesRead;
+                }
+
+                if (!madeProgress) {
+                    ALOGV("Did not make progress");
+                    usleep(((mFrameCount * 1000) / mSampleRate) * 1000);
+                }
+
+                // resampler accumulates, but we only have one source track
+                memset(mRsmpOutBuffer, 0, framesOut * FCC_2 * sizeof(int32_t));
+                mResampler->resample(mRsmpOutBuffer, framesOut,
+                        this /* AudioBufferProvider* */);
+                // ditherAndClamp() works as long as all buffers returned by
+                // activeTrack->getNextBuffer() are 32 bit aligned which should be always true.
+                if (mReqChannelCount == 1) {
+                    // temporarily type pun mRsmpOutBuffer from Q19.12 to int16_t
+                    ditherAndClamp(mRsmpOutBuffer, mRsmpOutBuffer, framesOut);
+                    // the resampler always outputs stereo samples:
+                    // do post stereo to mono conversion
+                    downmix_to_mono_i16_from_stereo_i16(buffer.i16, (int16_t *)mRsmpOutBuffer,
+                            framesOut);
+                } else {
+                    ditherAndClamp((int32_t *)buffer.raw, mRsmpOutBuffer, framesOut);
+                }
+                // now done with mRsmpOutBuffer
+
+            }
+            if (mFramestoDrop == 0) {
+                activeTrack->releaseBuffer(&buffer);
+            } else {
+                if (mFramestoDrop > 0) {
+                    mFramestoDrop -= buffer.frameCount;
+                    if (mFramestoDrop <= 0) {
+                        clearSyncStartEvent();
+                    }
+                } else {
+                    mFramestoDrop += buffer.frameCount;
+                    if (mFramestoDrop >= 0 || mSyncStartEvent == 0 ||
+                            mSyncStartEvent->isCancelled()) {
+                        ALOGW("Synced record %s, session %d, trigger session %d",
+                              (mFramestoDrop >= 0) ? "timed out" : "cancelled",
+                              activeTrack->sessionId(),
+                              (mSyncStartEvent != 0) ? mSyncStartEvent->triggerSession() : 0);
+                        clearSyncStartEvent();
+                    }
+                }
+            }
+            activeTrack->clearOverflow();
+        }
+        // client isn't retrieving buffers fast enough
+        else {
+            if (!activeTrack->setOverflow()) {
+                nsecs_t now = systemTime();
+                if ((now - lastWarning) > kWarningThrottleNs) {
+                    ALOGW("RecordThread: buffer overflow");
+                    lastWarning = now;
+                }
+            }
+            // Release the processor for a while before asking for a new buffer.
+            // This will give the application more chance to read from the buffer and
+            // clear the overflow.
+            doSleep = true;
+        }
+
         // enable changes in effect chain
         unlockEffectChains(effectChains);
-        effectChains.clear();
+        // effectChains doesn't need to be cleared, since it is cleared by destructor at scope end
     }
 
-    standby();
+    standbyIfNotAlreadyInStandby();
 
     {
         Mutex::Autolock _l(mLock);
@@ -4636,7 +4711,7 @@
     return false;
 }
 
-void AudioFlinger::RecordThread::standby()
+void AudioFlinger::RecordThread::standbyIfNotAlreadyInStandby()
 {
     if (!mStandby) {
         inputStandBy();
@@ -4649,7 +4724,7 @@
     mInput->stream->common.standby(&mInput->stream->common);
 }
 
-sp<AudioFlinger::RecordThread::RecordTrack>  AudioFlinger::RecordThread::createRecordTrack_l(
+sp<AudioFlinger::RecordThread::RecordTrack> AudioFlinger::RecordThread::createRecordTrack_l(
         const sp<AudioFlinger::Client>& client,
         uint32_t sampleRate,
         audio_format_t format,
@@ -4728,9 +4803,9 @@
         track = new RecordTrack(this, client, sampleRate,
                       format, channelMask, frameCount, sessionId, uid);
 
-        if (track->getCblk() == 0) {
-            ALOGE("createRecordTrack_l() no control block");
-            lStatus = NO_MEMORY;
+        lStatus = track->initCheck();
+        if (lStatus != NO_ERROR) {
+            ALOGE("createRecordTrack_l() initCheck failed %d; no control block?", lStatus);
             track.clear();
             goto Exit;
         }
@@ -4752,9 +4827,7 @@
     lStatus = NO_ERROR;
 
 Exit:
-    if (status) {
-        *status = lStatus;
-    }
+    *status = lStatus;
     return track;
 }
 
@@ -4785,6 +4858,7 @@
     }
 
     {
+        // This section is a rendezvous between binder thread executing start() and RecordThread
         AutoMutex lock(mLock);
         if (mActiveTrack != 0) {
             if (recordTrack != mActiveTrack.get()) {
@@ -4795,21 +4869,29 @@
             return status;
         }
 
+        // FIXME why? already set in constructor, 'STARTING_1' would be more accurate
         recordTrack->mState = TrackBase::IDLE;
         mActiveTrack = recordTrack;
         mLock.unlock();
         status_t status = AudioSystem::startInput(mId);
         mLock.lock();
+        // FIXME should verify that mActiveTrack is still == recordTrack
         if (status != NO_ERROR) {
             mActiveTrack.clear();
             clearSyncStartEvent();
             return status;
         }
+        // FIXME LEGACY
         mRsmpInIndex = mFrameCount;
+        mRsmpInFront = 0;
+        mRsmpInRear = 0;
+        mRsmpInUnrel = 0;
         mBytesRead = 0;
         if (mResampler != NULL) {
             mResampler->reset();
         }
+        // FIXME hijacking a playback track state name which was intended for start after pause;
+        //       here 'STARTING_2' would be more accurate
         mActiveTrack->mState = TrackBase::RESUMING;
         // signal thread to start
         ALOGV("Signal record thread");
@@ -4820,6 +4902,7 @@
             status = INVALID_OPERATION;
             goto startError;
         }
+        // FIXME incorrect usage of wait: no explicit predicate or loop
         mStartStopCond.wait(mLock);
         if (mActiveTrack == 0) {
             ALOGV("Record failed to start");
@@ -4870,11 +4953,13 @@
     if (recordTrack != mActiveTrack.get() || recordTrack->mState == TrackBase::PAUSING) {
         return false;
     }
+    // note that threadLoop may still be processing the track at this point [without lock]
     recordTrack->mState = TrackBase::PAUSING;
     // do not wait for mStartStopCond if exiting
     if (exitPending()) {
         return true;
     }
+    // FIXME incorrect usage of wait: no explicit predicate or loop
     mStartStopCond.wait(mLock);
     // if we have been restarted, recordTrack == mActiveTrack.get() here
     if (exitPending() || recordTrack != mActiveTrack.get()) {
@@ -4998,46 +5083,47 @@
 // AudioBufferProvider interface
 status_t AudioFlinger::RecordThread::getNextBuffer(AudioBufferProvider::Buffer* buffer, int64_t pts)
 {
-    size_t framesReq = buffer->frameCount;
-    size_t framesReady = mFrameCount - mRsmpInIndex;
-    int channelCount;
-
-    if (framesReady == 0) {
-        mBytesRead = mInput->stream->read(mInput->stream, mRsmpInBuffer, mBufferSize);
-        if (mBytesRead <= 0) {
-            if ((mBytesRead < 0) && (mActiveTrack->mState == TrackBase::ACTIVE)) {
-                ALOGE("RecordThread::getNextBuffer() Error reading audio input");
-                // Force input into standby so that it tries to
-                // recover at next read attempt
-                inputStandBy();
-                usleep(kRecordThreadSleepUs);
-            }
-            buffer->raw = NULL;
-            buffer->frameCount = 0;
-            return NOT_ENOUGH_DATA;
-        }
-        mRsmpInIndex = 0;
-        framesReady = mFrameCount;
+    int32_t rear = mRsmpInRear;
+    int32_t front = mRsmpInFront;
+    ssize_t filled = rear - front;
+    ALOG_ASSERT(0 <= filled && (size_t) filled <= mRsmpInFramesP2);
+    // 'filled' may be non-contiguous, so return only the first contiguous chunk
+    front &= mRsmpInFramesP2 - 1;
+    size_t part1 = mRsmpInFramesP2 - front;
+    if (part1 > (size_t) filled) {
+        part1 = filled;
+    }
+    size_t ask = buffer->frameCount;
+    ALOG_ASSERT(ask > 0);
+    if (part1 > ask) {
+        part1 = ask;
+    }
+    if (part1 == 0) {
+        // Higher-level should keep mRsmpInBuffer full, and not call resampler if empty
+        ALOGE("RecordThread::getNextBuffer() starved");
+        buffer->raw = NULL;
+        buffer->frameCount = 0;
+        mRsmpInUnrel = 0;
+        return NOT_ENOUGH_DATA;
     }
 
-    if (framesReq > framesReady) {
-        framesReq = framesReady;
-    }
-
-    if (mChannelCount == 1 && mReqChannelCount == 2) {
-        channelCount = 1;
-    } else {
-        channelCount = 2;
-    }
-    buffer->raw = mRsmpInBuffer + mRsmpInIndex * channelCount;
-    buffer->frameCount = framesReq;
+    buffer->raw = mRsmpInBuffer + front * mChannelCount;
+    buffer->frameCount = part1;
+    mRsmpInUnrel = part1;
     return NO_ERROR;
 }
 
 // AudioBufferProvider interface
 void AudioFlinger::RecordThread::releaseBuffer(AudioBufferProvider::Buffer* buffer)
 {
-    mRsmpInIndex += buffer->frameCount;
+    size_t stepCount = buffer->frameCount;
+    if (stepCount == 0) {
+        return;
+    }
+    ALOG_ASSERT(stepCount <= mRsmpInUnrel);
+    mRsmpInUnrel -= stepCount;
+    mRsmpInFront += stepCount;
+    buffer->raw = NULL;
     buffer->frameCount = 0;
 }
 
@@ -5052,7 +5138,7 @@
         int value;
         audio_format_t reqFormat = mFormat;
         uint32_t reqSamplingRate = mReqSampleRate;
-        uint32_t reqChannelCount = mReqChannelCount;
+        audio_channel_mask_t reqChannelMask = audio_channel_in_mask_from_count(mReqChannelCount);
 
         if (param.getInt(String8(AudioParameter::keySamplingRate), value) == NO_ERROR) {
             reqSamplingRate = value;
@@ -5067,8 +5153,13 @@
             }
         }
         if (param.getInt(String8(AudioParameter::keyChannels), value) == NO_ERROR) {
-            reqChannelCount = popcount(value);
-            reconfig = true;
+            audio_channel_mask_t mask = (audio_channel_mask_t) value;
+            if (mask != AUDIO_CHANNEL_IN_MONO && mask != AUDIO_CHANNEL_IN_STEREO) {
+                status = BAD_VALUE;
+            } else {
+                reqChannelMask = mask;
+                reconfig = true;
+            }
         }
         if (param.getInt(String8(AudioParameter::keyFrameCount), value) == NO_ERROR) {
             // do not accept frame count changes if tracks are open as the track buffer
@@ -5117,6 +5208,7 @@
             }
             mAudioSource = (audio_source_t)value;
         }
+
         if (status == NO_ERROR) {
             status = mInput->stream->common.set_parameters(&mInput->stream->common,
                     keyValuePair.string());
@@ -5133,7 +5225,8 @@
                             <= (2 * reqSamplingRate)) &&
                     popcount(mInput->stream->common.get_channels(&mInput->stream->common))
                             <= FCC_2 &&
-                    (reqChannelCount <= FCC_2)) {
+                    (reqChannelMask == AUDIO_CHANNEL_IN_MONO ||
+                            reqChannelMask == AUDIO_CHANNEL_IN_STEREO)) {
                     status = NO_ERROR;
                 }
                 if (status == NO_ERROR) {
@@ -5169,7 +5262,7 @@
 
 void AudioFlinger::RecordThread::audioConfigChanged_l(int event, int param) {
     AudioSystem::OutputDescriptor desc;
-    void *param2 = NULL;
+    const void *param2 = NULL;
 
     switch (event) {
     case AudioSystem::INPUT_OPENED:
@@ -5208,29 +5301,22 @@
     mFrameSize = audio_stream_frame_size(&mInput->stream->common);
     mBufferSize = mInput->stream->common.get_buffer_size(&mInput->stream->common);
     mFrameCount = mBufferSize / mFrameSize;
-    mRsmpInBuffer = new int16_t[mFrameCount * mChannelCount];
+    // With 3 HAL buffers, we can guarantee ability to down-sample the input by ratio of 2:1 to
+    // 1 full output buffer, regardless of the alignment of the available input.
+    mRsmpInFrames = mFrameCount * 3;
+    mRsmpInFramesP2 = roundup(mRsmpInFrames);
+    // Over-allocate beyond mRsmpInFramesP2 to permit a HAL read past end of buffer
+    mRsmpInBuffer = new int16_t[(mRsmpInFramesP2 + mFrameCount - 1) * mChannelCount];
+    mRsmpInFront = 0;
+    mRsmpInRear = 0;
+    mRsmpInUnrel = 0;
 
-    if (mSampleRate != mReqSampleRate && mChannelCount <= FCC_2 && mReqChannelCount <= FCC_2)
-    {
-        int channelCount;
-        // optimization: if mono to mono, use the resampler in stereo to stereo mode to avoid
-        // stereo to mono post process as the resampler always outputs stereo.
-        if (mChannelCount == 1 && mReqChannelCount == 2) {
-            channelCount = 1;
-        } else {
-            channelCount = 2;
-        }
-        mResampler = AudioResampler::create(16, channelCount, mReqSampleRate);
+    if (mSampleRate != mReqSampleRate && mChannelCount <= FCC_2 && mReqChannelCount <= FCC_2) {
+        mResampler = AudioResampler::create(16, (int) mChannelCount, mReqSampleRate);
         mResampler->setSampleRate(mSampleRate);
         mResampler->setVolume(AudioMixer::UNITY_GAIN, AudioMixer::UNITY_GAIN);
+        // resampler always outputs stereo
         mRsmpOutBuffer = new int32_t[mFrameCount * FCC_2];
-
-        // optmization: if mono to mono, alter input frame count as if we were inputing
-        // stereo samples
-        if (mChannelCount == 1 && mReqChannelCount == 1) {
-            mFrameCount >>= 1;
-        }
-
     }
     mRsmpInIndex = mFrameCount;
 }
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index a0b53cb..43e335d 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -36,6 +36,8 @@
                 audio_devices_t outDevice, audio_devices_t inDevice, type_t type);
     virtual             ~ThreadBase();
 
+    virtual status_t    readyToRun();
+
     void dumpBase(int fd, const Vector<String16>& args);
     void dumpEffectChains(int fd, const Vector<String16>& args);
 
@@ -141,6 +143,7 @@
                 void        sendIoConfigEvent_l(int event, int param = 0);
                 void        sendPrioConfigEvent_l(pid_t pid, pid_t tid, int32_t prio);
                 void        processConfigEvents();
+                void        processConfigEvents_l();
 
                 // see note at declaration of mStandby, mOutDevice and mInDevice
                 bool        standby() const { return mStandby; }
@@ -156,7 +159,7 @@
                                     int sessionId,
                                     effect_descriptor_t *desc,
                                     int *enabled,
-                                    status_t *status);
+                                    status_t *status /*non-NULL*/);
                 void disconnectEffect(const sp< EffectModule>& effect,
                                       EffectHandle *handle,
                                       bool unpinIfLast);
@@ -275,6 +278,7 @@
                 uint32_t                mChannelCount;
                 size_t                  mFrameSize;
                 audio_format_t          mFormat;
+                size_t                  mBufferSize;       // HAL buffer size for read() or write()
 
                 // Parameter sequence by client: binder thread calling setParameters():
                 //  1. Lock mLock
@@ -358,7 +362,6 @@
                 void        dump(int fd, const Vector<String16>& args);
 
     // Thread virtuals
-    virtual     status_t    readyToRun();
     virtual     bool        threadLoop();
 
     // RefBase
@@ -425,7 +428,7 @@
                                 IAudioFlinger::track_flags_t *flags,
                                 pid_t tid,
                                 int uid,
-                                status_t *status);
+                                status_t *status /*non-NULL*/);
 
                 AudioStreamOut* getOutput() const;
                 AudioStreamOut* clearOutput();
@@ -479,7 +482,6 @@
     size_t                          mNormalFrameCount;  // normal mixer and effects
 
     int16_t*                        mMixBuffer;         // frame size aligned mix buffer
-    int8_t*                         mAllocMixBuffer;    // mixer buffer allocation address
 
     // suspend count, > 0 means suspended.  While suspended, the thread continues to pull from
     // tracks and mix, but doesn't write to HAL.  A2DP and SCO HAL implementations can't handle
@@ -867,12 +869,12 @@
 
     // Thread virtuals
     virtual bool        threadLoop();
-    virtual status_t    readyToRun();
 
     // RefBase
     virtual void        onFirstRef();
 
     virtual status_t    initCheck() const { return (mInput == NULL) ? NO_INIT : NO_ERROR; }
+
             sp<AudioFlinger::RecordThread::RecordTrack>  createRecordTrack_l(
                     const sp<AudioFlinger::Client>& client,
                     uint32_t sampleRate,
@@ -883,7 +885,7 @@
                     int uid,
                     IAudioFlinger::track_flags_t *flags,
                     pid_t tid,
-                    status_t *status);
+                    status_t *status /*non-NULL*/);
 
             status_t    start(RecordTrack* recordTrack,
                               AudioSystem::sync_event_t event,
@@ -926,13 +928,13 @@
             bool        hasFastRecorder() const { return false; }
 
 private:
-            void clearSyncStartEvent();
+            void    clearSyncStartEvent();
 
             // Enter standby if not already in standby, and set mStandby flag
-            void standby();
+            void    standbyIfNotAlreadyInStandby();
 
             // Call the HAL standby method unconditionally, and don't change mStandby flag
-            void inputStandBy();
+            void    inputStandBy();
 
             AudioStreamIn                       *mInput;
             SortedVector < sp<RecordTrack> >    mTracks;
@@ -945,11 +947,22 @@
             AudioResampler                      *mResampler;
             // interleaved stereo pairs of fixed-point signed Q19.12
             int32_t                             *mRsmpOutBuffer;
-            int16_t                             *mRsmpInBuffer; // [mFrameCount * mChannelCount]
-            size_t                              mRsmpInIndex;
-            size_t                              mBufferSize;    // stream buffer size for read()
+
+            // resampler converts input at HAL Hz to output at AudioRecord client Hz
+            int16_t                             *mRsmpInBuffer; // see new[] for details on the size
+            size_t                              mRsmpInFrames;  // size of resampler input in frames
+            size_t                              mRsmpInFramesP2;// size rounded up to a power-of-2
+            size_t                              mRsmpInUnrel;   // unreleased frames remaining from
+                                                                // most recent getNextBuffer
+            // these are rolling counters that are never cleared
+            int32_t                             mRsmpInFront;   // next available frame
+            int32_t                             mRsmpInRear;    // last filled frame + 1
+            size_t                              mRsmpInIndex;   // FIXME legacy
+
+            // client's requested configuration, which may differ from the HAL configuration
             const uint32_t                      mReqChannelCount;
             const uint32_t                      mReqSampleRate;
+
             ssize_t                             mBytesRead;
             // sync event triggering actual audio capture. Frames read before this event will
             // be dropped and therefore not read by the application.
diff --git a/services/audioflinger/TrackBase.h b/services/audioflinger/TrackBase.h
index cd201d9..05fde7c 100644
--- a/services/audioflinger/TrackBase.h
+++ b/services/audioflinger/TrackBase.h
@@ -48,6 +48,7 @@
                                 int uid,
                                 bool isOut);
     virtual             ~TrackBase();
+    virtual status_t    initCheck() const { return getCblk() != 0 ? NO_ERROR : NO_MEMORY; }
 
     virtual status_t    start(AudioSystem::sync_event_t event,
                              int triggerSession) = 0;
@@ -78,15 +79,6 @@
 
     virtual uint32_t sampleRate() const { return mSampleRate; }
 
-    // Return a pointer to the start of a contiguous slice of the track buffer.
-    // Parameter 'offset' is the requested start position, expressed in
-    // monotonically increasing frame units relative to the track epoch.
-    // Parameter 'frames' is the requested length, also in frame units.
-    // Always returns non-NULL.  It is the caller's responsibility to
-    // verify that this will be successful; the result of calling this
-    // function with invalid 'offset' or 'frames' is undefined.
-    void* getBuffer(uint32_t offset, uint32_t frames) const;
-
     bool isStopped() const {
         return (mState == STOPPED || mState == FLUSHED);
     }
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index af04ce7..d5178b1 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -396,6 +396,15 @@
     }
 }
 
+status_t AudioFlinger::PlaybackThread::Track::initCheck() const
+{
+    status_t status = TrackBase::initCheck();
+    if (status == NO_ERROR && mName < 0) {
+        status = NO_MEMORY;
+    }
+    return status;
+}
+
 void AudioFlinger::PlaybackThread::Track::destroy()
 {
     // NOTE: destroyTrack_l() can remove a strong reference to this Track
@@ -1045,15 +1054,17 @@
 
         mTimedMemoryDealer = new MemoryDealer(kTimedBufferHeapSize,
                                               "AudioFlingerTimed");
-        if (mTimedMemoryDealer == NULL)
+        if (mTimedMemoryDealer == NULL) {
             return NO_MEMORY;
+        }
     }
 
     sp<IMemory> newBuffer = mTimedMemoryDealer->allocate(size);
     if (newBuffer == NULL) {
         newBuffer = mTimedMemoryDealer->allocate(size);
-        if (newBuffer == NULL)
+        if (newBuffer == NULL) {
             return NO_MEMORY;
+        }
     }
 
     *buffer = newBuffer;
@@ -1764,9 +1775,7 @@
 {
     ALOGV("RecordTrack constructor");
     if (mCblk != NULL) {
-        mAudioRecordServerProxy = new AudioRecordServerProxy(mCblk, mBuffer, frameCount,
-                mFrameSize);
-        mServerProxy = mAudioRecordServerProxy;
+        mServerProxy = new AudioRecordServerProxy(mCblk, mBuffer, frameCount, mFrameSize);
     }
 }