Merge "Fix a bug on subtitle (SRT)." into jb-dev
diff --git a/cmds/stagefright/stagefright.cpp b/cmds/stagefright/stagefright.cpp
index 6a5b45f..0362f39 100644
--- a/cmds/stagefright/stagefright.cpp
+++ b/cmds/stagefright/stagefright.cpp
@@ -608,6 +608,51 @@
     fprintf(stderr, "       -S allocate buffers from a surface\n");
     fprintf(stderr, "       -T allocate buffers from a surface texture\n");
     fprintf(stderr, "       -d(ump) filename (raw stream data to a file)\n");
+    fprintf(stderr, "       -D(ump) filename (decoded PCM data to a file)\n");
+}
+
+static void dumpCodecProfiles(const sp<IOMX>& omx, bool queryDecoders) {
+    const char *kMimeTypes[] = {
+        MEDIA_MIMETYPE_VIDEO_AVC, MEDIA_MIMETYPE_VIDEO_MPEG4,
+        MEDIA_MIMETYPE_VIDEO_H263, MEDIA_MIMETYPE_AUDIO_AAC,
+        MEDIA_MIMETYPE_AUDIO_AMR_NB, MEDIA_MIMETYPE_AUDIO_AMR_WB,
+        MEDIA_MIMETYPE_AUDIO_MPEG, MEDIA_MIMETYPE_AUDIO_G711_MLAW,
+        MEDIA_MIMETYPE_AUDIO_G711_ALAW, MEDIA_MIMETYPE_AUDIO_VORBIS,
+        MEDIA_MIMETYPE_VIDEO_VPX
+    };
+
+    const char *codecType = queryDecoders? "decoder" : "encoder";
+    printf("%s profiles:\n", codecType);
+
+    for (size_t k = 0; k < sizeof(kMimeTypes) / sizeof(kMimeTypes[0]); ++k) {
+        printf("type '%s':\n", kMimeTypes[k]);
+
+        Vector<CodecCapabilities> results;
+        // will retrieve hardware and software codecs
+        CHECK_EQ(QueryCodecs(omx, kMimeTypes[k],
+                             queryDecoders,
+                             &results), (status_t)OK);
+
+        for (size_t i = 0; i < results.size(); ++i) {
+            printf("  %s '%s' supports ",
+                       codecType, results[i].mComponentName.string());
+
+            if (results[i].mProfileLevels.size() == 0) {
+                    printf("NOTHING.\n");
+                    continue;
+            }
+
+            for (size_t j = 0; j < results[i].mProfileLevels.size(); ++j) {
+                const CodecProfileLevel &profileLevel =
+                     results[i].mProfileLevels[j];
+
+                printf("%s%ld/%ld", j > 0 ? ", " : "",
+                    profileLevel.mProfile, profileLevel.mLevel);
+            }
+
+            printf("\n");
+        }
+    }
 }
 
 int main(int argc, char **argv) {
@@ -621,6 +666,7 @@
     bool useSurfaceAlloc = false;
     bool useSurfaceTexAlloc = false;
     bool dumpStream = false;
+    bool dumpPCMStream = false;
     String8 dumpStreamFilename;
     gNumRepetitions = 1;
     gMaxNumFrames = 0;
@@ -635,7 +681,7 @@
     sp<LiveSession> liveSession;
 
     int res;
-    while ((res = getopt(argc, argv, "han:lm:b:ptsrow:kxSTd:")) >= 0) {
+    while ((res = getopt(argc, argv, "han:lm:b:ptsrow:kxSTd:D:")) >= 0) {
         switch (res) {
             case 'a':
             {
@@ -650,6 +696,14 @@
                 break;
             }
 
+            case 'D':
+            {
+                dumpPCMStream = true;
+                audioOnly = true;
+                dumpStreamFilename.setTo(optarg);
+                break;
+            }
+
             case 'l':
             {
                 listComponents = true;
@@ -830,46 +884,8 @@
 
         sp<IOMX> omx = service->getOMX();
         CHECK(omx.get() != NULL);
-
-        const char *kMimeTypes[] = {
-            MEDIA_MIMETYPE_VIDEO_AVC, MEDIA_MIMETYPE_VIDEO_MPEG4,
-            MEDIA_MIMETYPE_VIDEO_H263, MEDIA_MIMETYPE_AUDIO_AAC,
-            MEDIA_MIMETYPE_AUDIO_AMR_NB, MEDIA_MIMETYPE_AUDIO_AMR_WB,
-            MEDIA_MIMETYPE_AUDIO_MPEG, MEDIA_MIMETYPE_AUDIO_G711_MLAW,
-            MEDIA_MIMETYPE_AUDIO_G711_ALAW, MEDIA_MIMETYPE_AUDIO_VORBIS,
-            MEDIA_MIMETYPE_VIDEO_VPX
-        };
-
-        for (size_t k = 0; k < sizeof(kMimeTypes) / sizeof(kMimeTypes[0]);
-             ++k) {
-            printf("type '%s':\n", kMimeTypes[k]);
-
-            Vector<CodecCapabilities> results;
-            // will retrieve hardware and software codecs
-            CHECK_EQ(QueryCodecs(omx, kMimeTypes[k],
-                                 true, // queryDecoders
-                                 &results), (status_t)OK);
-
-            for (size_t i = 0; i < results.size(); ++i) {
-                printf("  decoder '%s' supports ",
-                       results[i].mComponentName.string());
-
-                if (results[i].mProfileLevels.size() == 0) {
-                    printf("NOTHING.\n");
-                    continue;
-                }
-
-                for (size_t j = 0; j < results[i].mProfileLevels.size(); ++j) {
-                    const CodecProfileLevel &profileLevel =
-                        results[i].mProfileLevels[j];
-
-                    printf("%s%ld/%ld", j > 0 ? ", " : "",
-                           profileLevel.mProfile, profileLevel.mLevel);
-                }
-
-                printf("\n");
-            }
-        }
+        dumpCodecProfiles(omx, true /* queryDecoders */);
+        dumpCodecProfiles(omx, false /* queryDecoders */);
     }
 
     if (listComponents) {
@@ -1094,6 +1110,20 @@
             writeSourcesToMP4(mediaSources, syncInfoPresent);
         } else if (dumpStream) {
             dumpSource(mediaSource, dumpStreamFilename);
+        } else if (dumpPCMStream) {
+            OMXClient client;
+            CHECK_EQ(client.connect(), (status_t)OK);
+
+            sp<MediaSource> decSource =
+                OMXCodec::Create(
+                        client.interface(),
+                        mediaSource->getFormat(),
+                        false,
+                        mediaSource,
+                        0,
+                        0);
+
+            dumpSource(decSource, dumpStreamFilename);
         } else if (seekTest) {
             performSeekTest(mediaSource);
         } else {
diff --git a/include/media/mediarecorder.h b/include/media/mediarecorder.h
index 30db642..6d304e0 100644
--- a/include/media/mediarecorder.h
+++ b/include/media/mediarecorder.h
@@ -75,8 +75,8 @@
     AUDIO_ENCODER_AMR_NB = 1,
     AUDIO_ENCODER_AMR_WB = 2,
     AUDIO_ENCODER_AAC = 3,
-    AUDIO_ENCODER_AAC_PLUS = 4,
-    AUDIO_ENCODER_EAAC_PLUS = 5,
+    AUDIO_ENCODER_HE_AAC = 4,
+    AUDIO_ENCODER_AAC_ELD = 5,
 
     AUDIO_ENCODER_LIST_END // must be the last - used to validate the audio encoder type
 };
diff --git a/include/media/stagefright/AACWriter.h b/include/media/stagefright/AACWriter.h
index 49397ee..df1b053 100644
--- a/include/media/stagefright/AACWriter.h
+++ b/include/media/stagefright/AACWriter.h
@@ -59,6 +59,7 @@
     int64_t mEstimatedDurationUs;
     int32_t mChannelCount;
     int32_t mSampleRate;
+    int32_t mAACProfile;
     int32_t mFrameDurationUs;
 
     static void *ThreadWrapper(void *);
diff --git a/include/media/stagefright/ACodec.h b/include/media/stagefright/ACodec.h
index 7d7af63..72827c1 100644
--- a/include/media/stagefright/ACodec.h
+++ b/include/media/stagefright/ACodec.h
@@ -22,6 +22,7 @@
 #include <android/native_window.h>
 #include <media/IOMX.h>
 #include <media/stagefright/foundation/AHierarchicalStateMachine.h>
+#include <media/stagefright/SkipCutBuffer.h>
 #include <OMX_Audio.h>
 
 namespace android {
@@ -120,6 +121,9 @@
     sp<ExecutingToIdleState> mExecutingToIdleState;
     sp<IdleToLoadedState> mIdleToLoadedState;
     sp<FlushingState> mFlushingState;
+    int32_t mEncoderDelay;
+    int32_t mEncoderPadding;
+    sp<SkipCutBuffer> mSkipCutBuffer;
 
     AString mComponentName;
     uint32_t mFlags;
@@ -182,7 +186,7 @@
     status_t setupAACCodec(
             bool encoder,
             int32_t numChannels, int32_t sampleRate, int32_t bitRate,
-            bool isADTS);
+            int32_t aacProfile, bool isADTS);
 
     status_t selectAudioPortFormat(
             OMX_U32 portIndex, OMX_AUDIO_CODINGTYPE desiredFormat);
diff --git a/include/media/stagefright/MetaData.h b/include/media/stagefright/MetaData.h
index 8a87d83..3c25a14 100644
--- a/include/media/stagefright/MetaData.h
+++ b/include/media/stagefright/MetaData.h
@@ -49,6 +49,7 @@
     kKeyFrameRate         = 'frmR',  // int32_t (video frame rate fps)
     kKeyBitRate           = 'brte',  // int32_t (bps)
     kKeyESDS              = 'esds',  // raw data
+    kKeyAACProfile        = 'aacp',  // int32_t
     kKeyAVCC              = 'avcc',  // raw data
     kKeyD263              = 'd263',  // raw data
     kKeyVorbisInfo        = 'vinf',  // raw data
diff --git a/include/media/stagefright/NuMediaExtractor.h b/include/media/stagefright/NuMediaExtractor.h
index e197134..c9c709c 100644
--- a/include/media/stagefright/NuMediaExtractor.h
+++ b/include/media/stagefright/NuMediaExtractor.h
@@ -18,6 +18,7 @@
 #define NU_MEDIA_EXTRACTOR_H_
 
 #include <media/stagefright/foundation/ABase.h>
+#include <media/stagefright/MediaSource.h>
 #include <utils/Errors.h>
 #include <utils/KeyedVector.h>
 #include <utils/RefBase.h>
@@ -53,8 +54,12 @@
     status_t getTrackFormat(size_t index, sp<AMessage> *format) const;
 
     status_t selectTrack(size_t index);
+    status_t unselectTrack(size_t index);
 
-    status_t seekTo(int64_t timeUs);
+    status_t seekTo(
+            int64_t timeUs,
+            MediaSource::ReadOptions::SeekMode mode =
+                MediaSource::ReadOptions::SEEK_CLOSEST_SYNC);
 
     status_t advance();
     status_t readSampleData(const sp<ABuffer> &buffer);
@@ -93,7 +98,11 @@
     int64_t mTotalBitrate;  // in bits/sec
     int64_t mDurationUs;
 
-    ssize_t fetchTrackSamples(int64_t seekTimeUs = -1ll);
+    ssize_t fetchTrackSamples(
+            int64_t seekTimeUs = -1ll,
+            MediaSource::ReadOptions::SeekMode mode =
+                MediaSource::ReadOptions::SEEK_CLOSEST_SYNC);
+
     void releaseTrackSamples();
 
     bool getTotalBitrate(int64_t *bitRate) const;
diff --git a/include/media/stagefright/OMXCodec.h b/include/media/stagefright/OMXCodec.h
index 055da5d..81350ca 100644
--- a/include/media/stagefright/OMXCodec.h
+++ b/include/media/stagefright/OMXCodec.h
@@ -204,7 +204,7 @@
     ReadOptions::SeekMode mSeekMode;
     int64_t mTargetTimeUs;
     bool mOutputPortSettingsChangedPending;
-    SkipCutBuffer *mSkipCutBuffer;
+    sp<SkipCutBuffer> mSkipCutBuffer;
 
     MediaBuffer *mLeftOverBuffer;
 
@@ -243,7 +243,7 @@
 
     status_t setAACFormat(
             int32_t numChannels, int32_t sampleRate, int32_t bitRate,
-            bool isADTS);
+            int32_t aacProfile, bool isADTS);
 
     void setG711Format(int32_t numChannels);
 
diff --git a/include/media/stagefright/SkipCutBuffer.h b/include/media/stagefright/SkipCutBuffer.h
index 27851ca..2653b53 100644
--- a/include/media/stagefright/SkipCutBuffer.h
+++ b/include/media/stagefright/SkipCutBuffer.h
@@ -27,12 +27,11 @@
  * utility class to cut the start and end off a stream of data in MediaBuffers
  *
  */
-class SkipCutBuffer {
+class SkipCutBuffer: public RefBase {
  public:
     // 'skip' is the number of bytes to skip from the beginning
     // 'cut' is the number of bytes to cut from the end
     SkipCutBuffer(int32_t skip, int32_t cut);
-    virtual ~SkipCutBuffer();
 
     // Submit one MediaBuffer for skipping and cutting. This may consume all or
     // some of the data in the buffer, or it may add data to it.
@@ -42,6 +41,9 @@
     void clear();
     size_t size(); // how many bytes are currently stored in the buffer
 
+ protected:
+    virtual ~SkipCutBuffer();
+
  private:
     void write(const char *src, size_t num);
     size_t read(char *dst, size_t num);
diff --git a/media/libmedia/AudioTrack.cpp b/media/libmedia/AudioTrack.cpp
index b1be8b1..cd419bd 100644
--- a/media/libmedia/AudioTrack.cpp
+++ b/media/libmedia/AudioTrack.cpp
@@ -54,6 +54,12 @@
         audio_stream_type_t streamType,
         uint32_t sampleRate)
 {
+    // FIXME merge with similar code in createTrack_l(), except we're missing
+    //       some information here that is available in createTrack_l():
+    //          audio_io_handle_t output
+    //          audio_format_t format
+    //          audio_channel_mask_t channelMask
+    //          audio_output_flags_t flags
     int afSampleRate;
     if (AudioSystem::getOutputSamplingRate(&afSampleRate, streamType) != NO_ERROR) {
         return NO_INIT;
@@ -201,11 +207,11 @@
         streamType = AUDIO_STREAM_MUSIC;
     }
 
-    int afSampleRate;
-    if (AudioSystem::getOutputSamplingRate(&afSampleRate, streamType) != NO_ERROR) {
-        return NO_INIT;
-    }
     if (sampleRate == 0) {
+        int afSampleRate;
+        if (AudioSystem::getOutputSamplingRate(&afSampleRate, streamType) != NO_ERROR) {
+            return NO_INIT;
+        }
         sampleRate = afSampleRate;
     }
 
@@ -223,6 +229,12 @@
         return BAD_VALUE;
     }
 
+    // AudioFlinger does not currently support 8-bit data in shared memory
+    if (format == AUDIO_FORMAT_PCM_8_BIT && sharedBuffer != 0) {
+        ALOGE("8-bit data in shared memory is not supported");
+        return BAD_VALUE;
+    }
+
     // force direct flag if format is not linear PCM
     if (!audio_is_linear_pcm(format)) {
         flags = (audio_output_flags_t)
@@ -744,14 +756,6 @@
         return NO_INIT;
     }
 
-    int afSampleRate;
-    if (AudioSystem::getSamplingRate(output, streamType, &afSampleRate) != NO_ERROR) {
-        return NO_INIT;
-    }
-    int afFrameCount;
-    if (AudioSystem::getFrameCount(output, streamType, &afFrameCount) != NO_ERROR) {
-        return NO_INIT;
-    }
     uint32_t afLatency;
     if (AudioSystem::getLatency(output, streamType, &afLatency) != NO_ERROR) {
         return NO_INIT;
@@ -768,14 +772,57 @@
         ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by client");
         flags = (audio_output_flags_t) (flags & ~AUDIO_OUTPUT_FLAG_FAST);
     }
-    ALOGV("createTrack_l() output %d afFrameCount %d afLatency %d", output, afFrameCount, afLatency);
+    ALOGV("createTrack_l() output %d afLatency %d", output, afLatency);
 
     mNotificationFramesAct = mNotificationFramesReq;
+
     if (!audio_is_linear_pcm(format)) {
+
         if (sharedBuffer != 0) {
+            // Same comment as below about ignoring frameCount parameter for set()
             frameCount = sharedBuffer->size();
+        } else if (frameCount == 0) {
+            int afFrameCount;
+            if (AudioSystem::getFrameCount(output, streamType, &afFrameCount) != NO_ERROR) {
+                return NO_INIT;
+            }
+            frameCount = afFrameCount;
         }
-    } else {
+
+    } else if (sharedBuffer != 0) {
+
+        // Ensure that buffer alignment matches channelCount
+        int channelCount = popcount(channelMask);
+        // 8-bit data in shared memory is not currently supported by AudioFlinger
+        size_t alignment = /* format == AUDIO_FORMAT_PCM_8_BIT ? 1 : */ 2;
+        if (channelCount > 1) {
+            // More than 2 channels does not require stronger alignment than stereo
+            alignment <<= 1;
+        }
+        if (((uint32_t)sharedBuffer->pointer() & (alignment - 1)) != 0) {
+            ALOGE("Invalid buffer alignment: address %p, channelCount %d",
+                    sharedBuffer->pointer(), channelCount);
+            return BAD_VALUE;
+        }
+
+        // When initializing a shared buffer AudioTrack via constructors,
+        // there's no frameCount parameter.
+        // But when initializing a shared buffer AudioTrack via set(),
+        // there _is_ a frameCount parameter.  We silently ignore it.
+        frameCount = sharedBuffer->size()/channelCount/sizeof(int16_t);
+
+    } else if (!(flags & AUDIO_OUTPUT_FLAG_FAST)) {
+
+        // FIXME move these calculations and associated checks to server
+        int afSampleRate;
+        if (AudioSystem::getSamplingRate(output, streamType, &afSampleRate) != NO_ERROR) {
+            return NO_INIT;
+        }
+        int afFrameCount;
+        if (AudioSystem::getFrameCount(output, streamType, &afFrameCount) != NO_ERROR) {
+            return NO_INIT;
+        }
+
         // Ensure that buffer depth covers at least audio hardware latency
         uint32_t minBufCount = afLatency / ((1000 * afFrameCount)/afSampleRate);
         if (minBufCount < 2) minBufCount = 2;
@@ -784,38 +831,27 @@
         ALOGV("minFrameCount: %d, afFrameCount=%d, minBufCount=%d, sampleRate=%d, afSampleRate=%d"
                 ", afLatency=%d",
                 minFrameCount, afFrameCount, minBufCount, sampleRate, afSampleRate, afLatency);
-#define MIN_FRAME_COUNT_FAST 128    // FIXME hard-coded
-        if ((flags & AUDIO_OUTPUT_FLAG_FAST) && (minFrameCount > MIN_FRAME_COUNT_FAST)) {
-            minFrameCount = MIN_FRAME_COUNT_FAST;
+
+        if (frameCount == 0) {
+            frameCount = minFrameCount;
+        }
+        if (mNotificationFramesAct == 0) {
+            mNotificationFramesAct = frameCount/2;
+        }
+        // Make sure that application is notified with sufficient margin
+        // before underrun
+        if (mNotificationFramesAct > (uint32_t)frameCount/2) {
+            mNotificationFramesAct = frameCount/2;
+        }
+        if (frameCount < minFrameCount) {
+            // not ALOGW because it happens all the time when playing key clicks over A2DP
+            ALOGV("Minimum buffer size corrected from %d to %d",
+                     frameCount, minFrameCount);
+            frameCount = minFrameCount;
         }
 
-        if (sharedBuffer == 0) {
-            if (frameCount == 0) {
-                frameCount = minFrameCount;
-            }
-            if (mNotificationFramesAct == 0) {
-                mNotificationFramesAct = frameCount/2;
-            }
-            // Make sure that application is notified with sufficient margin
-            // before underrun
-            if (mNotificationFramesAct > (uint32_t)frameCount/2) {
-                mNotificationFramesAct = frameCount/2;
-            }
-            if (frameCount < minFrameCount) {
-                // not ALOGW because it happens all the time when playing key clicks over A2DP
-                ALOGV("Minimum buffer size corrected from %d to %d",
-                         frameCount, minFrameCount);
-                frameCount = minFrameCount;
-            }
-        } else {
-            // Ensure that buffer alignment matches channelCount
-            int channelCount = popcount(channelMask);
-            if (((uint32_t)sharedBuffer->pointer() & (channelCount | 1)) != 0) {
-                ALOGE("Invalid buffer alignement: address %p, channelCount %d", sharedBuffer->pointer(), channelCount);
-                return BAD_VALUE;
-            }
-            frameCount = sharedBuffer->size()/channelCount/sizeof(int16_t);
-        }
+    } else {
+        // For fast tracks, the frame count calculations and checks are done by server
     }
 
     IAudioFlinger::track_flags_t trackFlags = IAudioFlinger::TRACK_DEFAULT;
@@ -864,6 +900,9 @@
         } else {
             ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %u", mCblk->frameCount);
         }
+        if (sharedBuffer == 0) {
+            mNotificationFramesAct = mCblk->frameCount/2;
+        }
     }
     if (sharedBuffer == 0) {
         mCblk->buffers = (char*)mCblk + sizeof(audio_track_cblk_t);
@@ -879,6 +918,7 @@
     mCblk->bufferTimeoutMs = MAX_STARTUP_TIMEOUT_MS;
     mCblk->waitTimeMs = 0;
     mRemainingFrames = mNotificationFramesAct;
+    // FIXME don't believe this lie
     mLatency = afLatency + (1000*mCblk->frameCount) / sampleRate;
     return NO_ERROR;
 }
diff --git a/media/libmedia/MediaProfiles.cpp b/media/libmedia/MediaProfiles.cpp
index c224f06..6929efa 100644
--- a/media/libmedia/MediaProfiles.cpp
+++ b/media/libmedia/MediaProfiles.cpp
@@ -41,9 +41,11 @@
 };
 
 const MediaProfiles::NameToTagMap MediaProfiles::sAudioEncoderNameMap[] = {
-    {"amrnb", AUDIO_ENCODER_AMR_NB},
-    {"amrwb", AUDIO_ENCODER_AMR_WB},
-    {"aac",   AUDIO_ENCODER_AAC},
+    {"amrnb",  AUDIO_ENCODER_AMR_NB},
+    {"amrwb",  AUDIO_ENCODER_AMR_WB},
+    {"aac",    AUDIO_ENCODER_AAC},
+    {"heaac",  AUDIO_ENCODER_HE_AAC},
+    {"aaceld", AUDIO_ENCODER_AAC_ELD}
 };
 
 const MediaProfiles::NameToTagMap MediaProfiles::sFileFormatMap[] = {
diff --git a/media/libmedia/ToneGenerator.cpp b/media/libmedia/ToneGenerator.cpp
index eac71c5..253602d 100644
--- a/media/libmedia/ToneGenerator.cpp
+++ b/media/libmedia/ToneGenerator.cpp
@@ -1024,7 +1024,7 @@
                       AUDIO_FORMAT_PCM_16_BIT,
                       AUDIO_CHANNEL_OUT_MONO,
                       0,    // frameCount
-                      AUDIO_OUTPUT_FLAG_NONE,
+                      AUDIO_OUTPUT_FLAG_FAST,
                       audioCallback,
                       this, // user
                       0,    // notificationFrames
diff --git a/media/libmediaplayerservice/StagefrightRecorder.cpp b/media/libmediaplayerservice/StagefrightRecorder.cpp
index 2c5644f..727fd0d 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.cpp
+++ b/media/libmediaplayerservice/StagefrightRecorder.cpp
@@ -24,6 +24,7 @@
 #include <binder/IServiceManager.h>
 
 #include <media/IMediaPlayerService.h>
+#include <media/openmax/OMX_Audio.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/AudioSource.h>
 #include <media/stagefright/AMRWriter.h>
@@ -817,7 +818,17 @@
             break;
         case AUDIO_ENCODER_AAC:
             mime = MEDIA_MIMETYPE_AUDIO_AAC;
+            encMeta->setInt32(kKeyAACProfile, OMX_AUDIO_AACObjectLC);
             break;
+        case AUDIO_ENCODER_HE_AAC:
+            mime = MEDIA_MIMETYPE_AUDIO_AAC;
+            encMeta->setInt32(kKeyAACProfile, OMX_AUDIO_AACObjectHE);
+            break;
+        case AUDIO_ENCODER_AAC_ELD:
+            mime = MEDIA_MIMETYPE_AUDIO_AAC;
+            encMeta->setInt32(kKeyAACProfile, OMX_AUDIO_AACObjectELD);
+            break;
+
         default:
             ALOGE("Unknown audio encoder: %d", mAudioEncoder);
             return NULL;
@@ -838,7 +849,6 @@
 
     OMXClient client;
     CHECK_EQ(client.connect(), (status_t)OK);
-
     sp<MediaSource> audioEncoder =
         OMXCodec::Create(client.interface(), encMeta,
                          true /* createEncoder */, audioSource);
@@ -852,7 +862,9 @@
     // Add support for OUTPUT_FORMAT_AAC_ADIF
     CHECK_EQ(mOutputFormat, OUTPUT_FORMAT_AAC_ADTS);
 
-    CHECK_EQ(mAudioEncoder, AUDIO_ENCODER_AAC);
+    CHECK(mAudioEncoder == AUDIO_ENCODER_AAC ||
+          mAudioEncoder == AUDIO_ENCODER_HE_AAC ||
+          mAudioEncoder == AUDIO_ENCODER_AAC_ELD);
     CHECK(mAudioSource != AUDIO_SOURCE_CNT);
 
     mWriter = new AACWriter(mOutputFd);
@@ -970,7 +982,9 @@
     sp<MediaWriter> writer = new MPEG2TSWriter(mOutputFd);
 
     if (mAudioSource != AUDIO_SOURCE_CNT) {
-        if (mAudioEncoder != AUDIO_ENCODER_AAC) {
+        if (mAudioEncoder != AUDIO_ENCODER_AAC &&
+            mAudioEncoder != AUDIO_ENCODER_HE_AAC &&
+            mAudioEncoder != AUDIO_ENCODER_AAC_ELD) {
             return ERROR_UNSUPPORTED;
         }
 
@@ -1435,6 +1449,8 @@
         case AUDIO_ENCODER_AMR_NB:
         case AUDIO_ENCODER_AMR_WB:
         case AUDIO_ENCODER_AAC:
+        case AUDIO_ENCODER_HE_AAC:
+        case AUDIO_ENCODER_AAC_ELD:
             break;
 
         default:
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
index f1467c4..2a770cd 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
@@ -39,7 +39,6 @@
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/MetaData.h>
-#include <media/stagefright/SkipCutBuffer.h>
 #include <gui/ISurfaceTexture.h>
 
 #include "avc_utils.h"
@@ -64,13 +63,10 @@
       mSkipRenderingVideoUntilMediaTimeUs(-1ll),
       mVideoLateByUs(0ll),
       mNumFramesTotal(0ll),
-      mNumFramesDropped(0ll),
-      mSkipCutBuffer(NULL) {
+      mNumFramesDropped(0ll) {
 }
 
 NuPlayer::~NuPlayer() {
-    delete mSkipCutBuffer;
-    mSkipCutBuffer = NULL;
 }
 
 void NuPlayer::setUID(uid_t uid) {
@@ -238,32 +234,6 @@
 
             mSource->start();
 
-            sp<MetaData> meta = mSource->getFormat(true /* audio */);
-            if (meta != NULL) {
-                int32_t delay = 0;
-                if (!meta->findInt32(kKeyEncoderDelay, &delay)) {
-                    delay = 0;
-                }
-                int32_t padding = 0;
-                if (!meta->findInt32(kKeyEncoderPadding, &padding)) {
-                    padding = 0;
-                }
-                int32_t numchannels = 0;
-                if (delay + padding) {
-                    if (meta->findInt32(kKeyChannelCount, &numchannels)) {
-                        size_t frameSize = numchannels * sizeof(int16_t);
-                        if (mSkipCutBuffer) {
-                            size_t prevbuffersize = mSkipCutBuffer->size();
-                            if (prevbuffersize != 0) {
-                                ALOGW("Replacing SkipCutBuffer holding %d bytes", prevbuffersize);
-                            }
-                            delete mSkipCutBuffer;
-                        }
-                        mSkipCutBuffer = new SkipCutBuffer(delay * frameSize, padding * frameSize);
-                    }
-                }
-            }
-
             mRenderer = new Renderer(
                     mAudioSink,
                     new AMessage(kWhatRendererNotify, id()));
@@ -892,10 +862,6 @@
         skipUntilMediaTimeUs = -1;
     }
 
-    if (audio && mSkipCutBuffer) {
-        mSkipCutBuffer->submit(buffer);
-    }
-
     mRenderer->queueBuffer(audio, buffer, reply);
 }
 
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.h b/media/libmediaplayerservice/nuplayer/NuPlayer.h
index f917f64..25766e0 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.h
@@ -27,7 +27,6 @@
 struct ACodec;
 struct MetaData;
 struct NuPlayerDriver;
-class SkipCutBuffer;
 
 struct NuPlayer : public AHandler {
     NuPlayer();
@@ -129,8 +128,6 @@
     int64_t mVideoLateByUs;
     int64_t mNumFramesTotal, mNumFramesDropped;
 
-    SkipCutBuffer *mSkipCutBuffer;
-
     status_t instantiateDecoder(bool audio, sp<Decoder> *decoder);
 
     status_t feedDecoderInputData(bool audio, const sp<AMessage> &msg);
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
index 25974b6..d18d146 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
@@ -124,6 +124,15 @@
         msg->setInt32("channel-count", numChannels);
         msg->setInt32("sample-rate", sampleRate);
 
+        int32_t delay = 0;
+        if (meta->findInt32(kKeyEncoderDelay, &delay)) {
+            msg->setInt32("encoder-delay", delay);
+        }
+        int32_t padding = 0;
+        if (meta->findInt32(kKeyEncoderPadding, &padding)) {
+            msg->setInt32("encoder-padding", padding);
+        }
+
         int32_t isADTS;
         if (meta->findInt32(kKeyIsADTS, &isADTS) && isADTS != 0) {
             msg->setInt32("is-adts", true);
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
index 253bc2f..441cbf3 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
@@ -103,7 +103,7 @@
 }
 
 status_t NuPlayerDriver::prepare() {
-    sendEvent(MEDIA_SET_VIDEO_SIZE, 320, 240);
+    sendEvent(MEDIA_SET_VIDEO_SIZE, 0, 0);
     return OK;
 }
 
diff --git a/media/libstagefright/AACWriter.cpp b/media/libstagefright/AACWriter.cpp
index 9cdb463..21c5428 100644
--- a/media/libstagefright/AACWriter.cpp
+++ b/media/libstagefright/AACWriter.cpp
@@ -18,6 +18,7 @@
 #define LOG_TAG "AACWriter"
 #include <utils/Log.h>
 
+#include <media/openmax/OMX_Audio.h>
 #include <media/stagefright/AACWriter.h>
 #include <media/stagefright/MediaBuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
@@ -38,7 +39,8 @@
       mPaused(false),
       mResumed(false),
       mChannelCount(-1),
-      mSampleRate(-1) {
+      mSampleRate(-1),
+      mAACProfile(OMX_AUDIO_AACObjectLC) {
 
     ALOGV("AACWriter Constructor");
 
@@ -96,6 +98,7 @@
     CHECK(!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AAC));
     CHECK(meta->findInt32(kKeyChannelCount, &mChannelCount));
     CHECK(meta->findInt32(kKeySampleRate, &mSampleRate));
+    CHECK(meta->findInt32(kKeyAACProfile, &mAACProfile));
     CHECK(mChannelCount >= 1 && mChannelCount <= 2);
 
     mSource = source;
@@ -254,7 +257,7 @@
     data |= kProtectionAbsense;
     write(mFd, &data, 1);
 
-    const uint8_t kProfileCode = 1;  // AAC-LC
+    const uint8_t kProfileCode = mAACProfile - 1;
     uint8_t kSampleFreqIndex;
     CHECK(getSampleRateTableIndex(mSampleRate, &kSampleFreqIndex));
     const uint8_t kPrivateStream = 0;
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index 5ac34c9..0de2d0a 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -852,13 +852,16 @@
                 || !msg->findInt32("sample-rate", &sampleRate)) {
             err = INVALID_OPERATION;
         } else {
-            int32_t isADTS;
+            int32_t isADTS, aacProfile;
             if (!msg->findInt32("is-adts", &isADTS)) {
                 isADTS = 0;
             }
+            if (!msg->findInt32("aac-profile", &aacProfile)) {
+                aacProfile = OMX_AUDIO_AACObjectNull;
+            }
 
             err = setupAACCodec(
-                    encoder, numChannels, sampleRate, bitRate, isADTS != 0);
+                    encoder, numChannels, sampleRate, bitRate, aacProfile, isADTS != 0);
         }
     } else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AMR_NB)) {
         err = setupAMRCodec(encoder, false /* isWAMR */, bitRate);
@@ -886,6 +889,13 @@
         }
     }
 
+    if (!msg->findInt32("encoder-delay", &mEncoderDelay)) {
+        mEncoderDelay = 0;
+    }
+    if (!msg->findInt32("encoder-padding", &mEncoderPadding)) {
+        mEncoderPadding = 0;
+    }
+
     int32_t maxInputSize;
     if (msg->findInt32("max-input-size", &maxInputSize)) {
         err = setMinBufferSize(kPortIndexInput, (size_t)maxInputSize);
@@ -960,8 +970,8 @@
 }
 
 status_t ACodec::setupAACCodec(
-        bool encoder,
-        int32_t numChannels, int32_t sampleRate, int32_t bitRate, bool isADTS) {
+        bool encoder, int32_t numChannels, int32_t sampleRate,
+        int32_t bitRate, int32_t aacProfile, bool isADTS) {
     if (encoder && isADTS) {
         return -EINVAL;
     }
@@ -1026,7 +1036,7 @@
         profile.nFrameLength = 0;
         profile.nAACtools = OMX_AUDIO_AACToolAll;
         profile.nAACERtools = OMX_AUDIO_AACERNone;
-        profile.eAACProfile = OMX_AUDIO_AACObjectLC;
+        profile.eAACProfile = (OMX_AUDIO_AACPROFILETYPE) aacProfile;
         profile.eAACStreamFormat = OMX_AUDIO_AACStreamFormatMP4FF;
 
         err = mOMX->setParameter(
@@ -2000,6 +2010,17 @@
             notify->setString("mime", MEDIA_MIMETYPE_AUDIO_RAW);
             notify->setInt32("channel-count", params.nChannels);
             notify->setInt32("sample-rate", params.nSamplingRate);
+            if (mEncoderDelay + mEncoderPadding) {
+                size_t frameSize = params.nChannels * sizeof(int16_t);
+                if (mSkipCutBuffer != NULL) {
+                    size_t prevbufsize = mSkipCutBuffer->size();
+                    if (prevbufsize != 0) {
+                        ALOGW("Replacing SkipCutBuffer holding %d bytes", prevbufsize);
+                    }
+                }
+                mSkipCutBuffer = new SkipCutBuffer(mEncoderDelay * frameSize,
+                                                   mEncoderPadding * frameSize);
+            }
             break;
         }
 
@@ -2414,6 +2435,9 @@
                 info->mData->setRange(rangeOffset, rangeLength);
             }
 
+            if (mCodec->mSkipCutBuffer != NULL) {
+                mCodec->mSkipCutBuffer->submit(info->mData);
+            }
             info->mData->meta()->setInt64("timeUs", timeUs);
 
             sp<AMessage> notify = mCodec->mNotify->dup();
diff --git a/media/libstagefright/Android.mk b/media/libstagefright/Android.mk
index 2169cac..e2e5091 100644
--- a/media/libstagefright/Android.mk
+++ b/media/libstagefright/Android.mk
@@ -91,8 +91,6 @@
 LOCAL_STATIC_LIBRARIES := \
         libstagefright_color_conversion \
         libstagefright_aacenc \
-        libstagefright_avcenc \
-        libstagefright_m4vh263enc \
         libstagefright_matroska \
         libstagefright_timedtext \
         libvpx \
diff --git a/media/libstagefright/AwesomePlayer.cpp b/media/libstagefright/AwesomePlayer.cpp
index b15cb67..1387e74 100644
--- a/media/libstagefright/AwesomePlayer.cpp
+++ b/media/libstagefright/AwesomePlayer.cpp
@@ -1345,7 +1345,7 @@
 }
 
 void AwesomePlayer::addTextSource(size_t trackIndex, const sp<MediaSource>& source) {
-    Mutex::Autolock autoLock(mTimedTextLock);
+    Mutex::Autolock autoLock(mLock);
     CHECK(source != NULL);
 
     if (mTextDriver == NULL) {
@@ -1395,7 +1395,6 @@
     if (mAudioSource != NULL) {
         Mutex::Autolock autoLock(mStatsLock);
         TrackStat *stat = &mStats.mTracks.editItemAt(mStats.mAudioTrackIndex);
-
         const char *component;
         if (!mAudioSource->getFormat()
                 ->findCString(kKeyDecoderComponent, &component)) {
@@ -2268,13 +2267,13 @@
 }
 
 status_t AwesomePlayer::getTrackInfo(Parcel *reply) const {
-    Mutex::Autolock autoLock(mTimedTextLock);
-    if (mTextDriver == NULL) {
-        return INVALID_OPERATION;
+    Mutex::Autolock autoLock(mLock);
+    size_t trackCount = mExtractor->countTracks();
+    if (mTextDriver != NULL) {
+        trackCount += mTextDriver->countExternalTracks();
     }
 
-    reply->writeInt32(mTextDriver->countExternalTracks() +
-                mExtractor->countTracks());
+    reply->writeInt32(trackCount);
     for (size_t i = 0; i < mExtractor->countTracks(); ++i) {
         sp<MetaData> meta = mExtractor->getTrackMetaData(i);
 
@@ -2296,28 +2295,31 @@
         }
 
         const char *lang;
-        if (meta->findCString(kKeyMediaLanguage, &lang)) {
-            reply->writeString16(String16(lang));
-        } else {
-            reply->writeString16(String16(""));
+        if (!meta->findCString(kKeyMediaLanguage, &lang)) {
+            lang = "und";
         }
+        reply->writeString16(String16(lang));
     }
 
-    mTextDriver->getExternalTrackInfo(reply);
+    if (mTextDriver != NULL) {
+        mTextDriver->getExternalTrackInfo(reply);
+    }
     return OK;
 }
 
 // FIXME:
 // At present, only timed text track is able to be selected or unselected.
 status_t AwesomePlayer::selectTrack(size_t trackIndex, bool select) {
-    Mutex::Autolock autoLock(mTimedTextLock);
-    if (mTextDriver == NULL) {
-        return INVALID_OPERATION;
+    ALOGV("selectTrack: trackIndex = %d and select=%d", trackIndex, select);
+    Mutex::Autolock autoLock(mLock);
+    size_t trackCount = mExtractor->countTracks();
+    if (mTextDriver != NULL) {
+        trackCount += mTextDriver->countExternalTracks();
     }
 
-    if (trackIndex >= mExtractor->countTracks()
-                + mTextDriver->countExternalTracks()) {
-        return BAD_VALUE;
+    if (trackIndex >= trackCount) {
+        ALOGE("Track index (%d) is out of range [0, %d)", trackIndex, trackCount);
+        return ERROR_OUT_OF_RANGE;
     }
 
     if (trackIndex < mExtractor->countTracks()) {
@@ -2331,6 +2333,11 @@
         }
     }
 
+    // Timed text track handling
+    if (mTextDriver == NULL) {
+        return INVALID_OPERATION;
+    }
+
     status_t err = OK;
     if (select) {
         err = mTextDriver->selectTrack(trackIndex);
@@ -2371,7 +2378,7 @@
         }
         case INVOKE_ID_ADD_EXTERNAL_SOURCE:
         {
-            Mutex::Autolock autoLock(mTimedTextLock);
+            Mutex::Autolock autoLock(mLock);
             if (mTextDriver == NULL) {
                 mTextDriver = new TimedTextDriver(mListener);
             }
@@ -2383,7 +2390,7 @@
         }
         case INVOKE_ID_ADD_EXTERNAL_SOURCE_FD:
         {
-            Mutex::Autolock autoLock(mTimedTextLock);
+            Mutex::Autolock autoLock(mLock);
             if (mTextDriver == NULL) {
                 mTextDriver = new TimedTextDriver(mListener);
             }
@@ -2398,12 +2405,12 @@
         case INVOKE_ID_SELECT_TRACK:
         {
             int trackIndex = request.readInt32();
-            return selectTrack(trackIndex, true);
+            return selectTrack(trackIndex, true /* select */);
         }
         case INVOKE_ID_UNSELECT_TRACK:
         {
             int trackIndex = request.readInt32();
-            return selectTrack(trackIndex, false);
+            return selectTrack(trackIndex, false /* select */);
         }
         default:
         {
diff --git a/media/libstagefright/MPEG4Extractor.cpp b/media/libstagefright/MPEG4Extractor.cpp
index 9385b8a..a572541 100644
--- a/media/libstagefright/MPEG4Extractor.cpp
+++ b/media/libstagefright/MPEG4Extractor.cpp
@@ -28,6 +28,7 @@
 #include <stdlib.h>
 #include <string.h>
 
+#include <media/stagefright/foundation/ABitReader.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AMessage.h>
 #include <media/stagefright/DataSource.h>
@@ -1824,26 +1825,23 @@
         return ERROR_MALFORMED;
     }
 
-    uint32_t objectType = csd[0] >> 3;
+    ABitReader br(csd, csd_size);
+    uint32_t objectType = br.getBits(5);
 
-    if (objectType == 31) {
-        return ERROR_UNSUPPORTED;
+    if (objectType == 31) {  // AAC-ELD => additional 6 bits
+        objectType = 32 + br.getBits(6);
     }
 
-    uint32_t freqIndex = (csd[0] & 7) << 1 | (csd[1] >> 7);
+    uint32_t freqIndex = br.getBits(4);
+
     int32_t sampleRate = 0;
     int32_t numChannels = 0;
     if (freqIndex == 15) {
         if (csd_size < 5) {
             return ERROR_MALFORMED;
         }
-
-        sampleRate = (csd[1] & 0x7f) << 17
-                        | csd[2] << 9
-                        | csd[3] << 1
-                        | (csd[4] >> 7);
-
-        numChannels = (csd[4] >> 3) & 15;
+        sampleRate = br.getBits(24);
+        numChannels = br.getBits(4);
     } else {
         static uint32_t kSamplingRate[] = {
             96000, 88200, 64000, 48000, 44100, 32000, 24000, 22050,
@@ -1855,7 +1853,7 @@
         }
 
         sampleRate = kSamplingRate[freqIndex];
-        numChannels = (csd[1] >> 3) & 15;
+        numChannels = br.getBits(4);
     }
 
     if (numChannels == 0) {
diff --git a/media/libstagefright/MediaCodecList.cpp b/media/libstagefright/MediaCodecList.cpp
index c39aa77..9f6d4a3 100644
--- a/media/libstagefright/MediaCodecList.cpp
+++ b/media/libstagefright/MediaCodecList.cpp
@@ -61,11 +61,6 @@
         // These are currently still used by the video editing suite.
 
         addMediaCodec(true /* encoder */, "AACEncoder", "audio/mp4a-latm");
-        addMediaCodec(true /* encoder */, "AVCEncoder", "video/avc");
-
-        addMediaCodec(true /* encoder */, "M4vH263Encoder");
-        addType("video/3gpp");
-        addType("video/mp4v-es");
 
         addMediaCodec(
                 false /* encoder */, "OMX.google.raw.decoder", "audio/raw");
diff --git a/media/libstagefright/NuMediaExtractor.cpp b/media/libstagefright/NuMediaExtractor.cpp
index 7cfb8ea..d6075cd 100644
--- a/media/libstagefright/NuMediaExtractor.cpp
+++ b/media/libstagefright/NuMediaExtractor.cpp
@@ -98,6 +98,20 @@
         return ERROR_UNSUPPORTED;
     }
 
+    sp<MetaData> fileMeta = mImpl->getMetaData();
+    const char *containerMime;
+    if (fileMeta != NULL
+            && fileMeta->findCString(kKeyMIMEType, &containerMime)
+            && !strcasecmp(containerMime, "video/wvm")) {
+        // We always want to use "cryptoPluginMode" when using the wvm
+        // extractor. We can tell that it is this extractor by looking
+        // at the container mime type.
+        // The cryptoPluginMode ensures that the extractor will actually
+        // give us data in a call to MediaSource::read(), unlike its
+        // default mode that we use from AwesomePlayer.
+        static_cast<WVMExtractor *>(mImpl.get())->setCryptoPluginMode(true);
+    }
+
     mDataSource = dataSource;
 
     updateDurationAndBitrate();
@@ -206,6 +220,15 @@
         msg->setInt32("channel-count", numChannels);
         msg->setInt32("sample-rate", sampleRate);
 
+        int32_t delay = 0;
+        if (meta->findInt32(kKeyEncoderDelay, &delay)) {
+            msg->setInt32("encoder-delay", delay);
+        }
+        int32_t padding = 0;
+        if (meta->findInt32(kKeyEncoderPadding, &padding)) {
+            msg->setInt32("encoder-padding", padding);
+        }
+
         int32_t isADTS;
         if (meta->findInt32(kKeyIsADTS, &isADTS)) {
             msg->setInt32("is-adts", true);
@@ -384,6 +407,47 @@
     return OK;
 }
 
+status_t NuMediaExtractor::unselectTrack(size_t index) {
+    Mutex::Autolock autoLock(mLock);
+
+    if (mImpl == NULL) {
+        return -EINVAL;
+    }
+
+    if (index >= mImpl->countTracks()) {
+        return -ERANGE;
+    }
+
+    size_t i;
+    for (i = 0; i < mSelectedTracks.size(); ++i) {
+        TrackInfo *info = &mSelectedTracks.editItemAt(i);
+
+        if (info->mTrackIndex == index) {
+            break;
+        }
+    }
+
+    if (i == mSelectedTracks.size()) {
+        // Not selected.
+        return OK;
+    }
+
+    TrackInfo *info = &mSelectedTracks.editItemAt(i);
+
+    if (info->mSample != NULL) {
+        info->mSample->release();
+        info->mSample = NULL;
+
+        info->mSampleTimeUs = -1ll;
+    }
+
+    CHECK_EQ((status_t)OK, info->mSource->stop());
+
+    mSelectedTracks.removeAt(i);
+
+    return OK;
+}
+
 void NuMediaExtractor::releaseTrackSamples() {
     for (size_t i = 0; i < mSelectedTracks.size(); ++i) {
         TrackInfo *info = &mSelectedTracks.editItemAt(i);
@@ -397,7 +461,8 @@
     }
 }
 
-ssize_t NuMediaExtractor::fetchTrackSamples(int64_t seekTimeUs) {
+ssize_t NuMediaExtractor::fetchTrackSamples(
+        int64_t seekTimeUs, MediaSource::ReadOptions::SeekMode mode) {
     TrackInfo *minInfo = NULL;
     ssize_t minIndex = -1;
 
@@ -419,7 +484,7 @@
         if (info->mSample == NULL) {
             MediaSource::ReadOptions options;
             if (seekTimeUs >= 0ll) {
-                options.setSeekTo(seekTimeUs);
+                options.setSeekTo(seekTimeUs, mode);
             }
             status_t err = info->mSource->read(&info->mSample, &options);
 
@@ -445,10 +510,11 @@
     return minIndex;
 }
 
-status_t NuMediaExtractor::seekTo(int64_t timeUs) {
+status_t NuMediaExtractor::seekTo(
+        int64_t timeUs, MediaSource::ReadOptions::SeekMode mode) {
     Mutex::Autolock autoLock(mLock);
 
-    ssize_t minIndex = fetchTrackSamples(timeUs);
+    ssize_t minIndex = fetchTrackSamples(timeUs, mode);
 
     if (minIndex < 0) {
         return ERROR_END_OF_STREAM;
diff --git a/media/libstagefright/OMXCodec.cpp b/media/libstagefright/OMXCodec.cpp
index 1d6f927..791e044 100755
--- a/media/libstagefright/OMXCodec.cpp
+++ b/media/libstagefright/OMXCodec.cpp
@@ -19,8 +19,6 @@
 #include <utils/Log.h>
 
 #include "include/AACEncoder.h"
-#include "include/AVCEncoder.h"
-#include "include/M4vH263Encoder.h"
 
 #include "include/ESDS.h"
 
@@ -67,8 +65,6 @@
 #define FACTORY_REF(name) { #name, Make##name },
 
 FACTORY_CREATE_ENCODER(AACEncoder)
-FACTORY_CREATE_ENCODER(AVCEncoder)
-FACTORY_CREATE_ENCODER(M4vH263Encoder)
 
 static sp<MediaSource> InstantiateSoftwareEncoder(
         const char *name, const sp<MediaSource> &source,
@@ -80,8 +76,6 @@
 
     static const FactoryInfo kFactoryInfo[] = {
         FACTORY_REF(AACEncoder)
-        FACTORY_REF(AVCEncoder)
-        FACTORY_REF(M4vH263Encoder)
     };
     for (size_t i = 0;
          i < sizeof(kFactoryInfo) / sizeof(kFactoryInfo[0]); ++i) {
@@ -511,16 +505,20 @@
     } else if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_AMR_WB, mMIME)) {
         setAMRFormat(true /* isWAMR */, bitRate);
     } else if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_AAC, mMIME)) {
-        int32_t numChannels, sampleRate;
+        int32_t numChannels, sampleRate, aacProfile;
         CHECK(meta->findInt32(kKeyChannelCount, &numChannels));
         CHECK(meta->findInt32(kKeySampleRate, &sampleRate));
 
+        if (!meta->findInt32(kKeyAACProfile, &aacProfile)) {
+            aacProfile = OMX_AUDIO_AACObjectNull;
+        }
+
         int32_t isADTS;
         if (!meta->findInt32(kKeyIsADTS, &isADTS)) {
             isADTS = false;
         }
 
-        status_t err = setAACFormat(numChannels, sampleRate, bitRate, isADTS);
+        status_t err = setAACFormat(numChannels, sampleRate, bitRate, aacProfile, isADTS);
         if (err != OK) {
             CODEC_LOGE("setAACFormat() failed (err = %d)", err);
             return err;
@@ -1430,9 +1428,6 @@
 
     free(mMIME);
     mMIME = NULL;
-
-    delete mSkipCutBuffer;
-    mSkipCutBuffer = NULL;
 }
 
 status_t OMXCodec::init() {
@@ -1606,14 +1601,13 @@
         }
         int32_t numchannels = 0;
         if (delay + padding) {
-            if (meta->findInt32(kKeyChannelCount, &numchannels)) {
+            if (mOutputFormat->findInt32(kKeyChannelCount, &numchannels)) {
                 size_t frameSize = numchannels * sizeof(int16_t);
-                if (mSkipCutBuffer) {
+                if (mSkipCutBuffer != NULL) {
                     size_t prevbuffersize = mSkipCutBuffer->size();
                     if (prevbuffersize != 0) {
                         ALOGW("Replacing SkipCutBuffer holding %d bytes", prevbuffersize);
                     }
-                    delete mSkipCutBuffer;
                 }
                 mSkipCutBuffer = new SkipCutBuffer(delay * frameSize, padding * frameSize);
             }
@@ -2537,7 +2531,7 @@
             CHECK_EQ(countBuffersWeOwn(mPortBuffers[portIndex]),
                      mPortBuffers[portIndex].size());
 
-            if (mSkipCutBuffer && mPortStatus[kPortIndexOutput] == ENABLED) {
+            if (mSkipCutBuffer != NULL && mPortStatus[kPortIndexOutput] == ENABLED) {
                 mSkipCutBuffer->clear();
             }
 
@@ -3395,7 +3389,7 @@
 }
 
 status_t OMXCodec::setAACFormat(
-        int32_t numChannels, int32_t sampleRate, int32_t bitRate, bool isADTS) {
+        int32_t numChannels, int32_t sampleRate, int32_t bitRate, int32_t aacProfile, bool isADTS) {
     if (numChannels > 2) {
         ALOGW("Number of channels: (%d) \n", numChannels);
     }
@@ -3453,7 +3447,7 @@
         profile.nFrameLength = 0;
         profile.nAACtools = OMX_AUDIO_AACToolAll;
         profile.nAACERtools = OMX_AUDIO_AACERNone;
-        profile.eAACProfile = OMX_AUDIO_AACObjectLC;
+        profile.eAACProfile = (OMX_AUDIO_AACPROFILETYPE) aacProfile;
         profile.eAACStreamFormat = OMX_AUDIO_AACStreamFormatMP4FF;
         err = mOMX->setParameter(mNode, OMX_IndexParamAudioAac,
                 &profile, sizeof(profile));
@@ -3859,7 +3853,7 @@
     info->mStatus = OWNED_BY_CLIENT;
 
     info->mMediaBuffer->add_ref();
-    if (mSkipCutBuffer) {
+    if (mSkipCutBuffer != NULL) {
         mSkipCutBuffer->submit(info->mMediaBuffer);
     }
     *buffer = info->mMediaBuffer;
diff --git a/media/libstagefright/WVMExtractor.cpp b/media/libstagefright/WVMExtractor.cpp
index effe336..08d2ae2 100644
--- a/media/libstagefright/WVMExtractor.cpp
+++ b/media/libstagefright/WVMExtractor.cpp
@@ -127,6 +127,12 @@
     }
 }
 
+void WVMExtractor::setCryptoPluginMode(bool cryptoPluginMode) {
+    if (mImpl != NULL) {
+        mImpl->setCryptoPluginMode(cryptoPluginMode);
+    }
+}
+
 void WVMExtractor::setUID(uid_t uid) {
     if (mImpl != NULL) {
         mImpl->setUID(uid);
diff --git a/media/libstagefright/codecs/aacdec/SoftAAC2.cpp b/media/libstagefright/codecs/aacdec/SoftAAC2.cpp
index aa65a0b..bf7befd 100644
--- a/media/libstagefright/codecs/aacdec/SoftAAC2.cpp
+++ b/media/libstagefright/codecs/aacdec/SoftAAC2.cpp
@@ -108,6 +108,7 @@
             status = OK;
         }
     }
+    mIsFirst = true;
     return status;
 }
 
@@ -141,9 +142,9 @@
                 aacParams->nSampleRate = 44100;
                 aacParams->nFrameLength = 0;
             } else {
-                aacParams->nChannels = mStreamInfo->channelConfig;
-                aacParams->nSampleRate = mStreamInfo->aacSampleRate;
-                aacParams->nFrameLength = mStreamInfo->aacSamplesPerFrame;
+                aacParams->nChannels = mStreamInfo->numChannels;
+                aacParams->nSampleRate = mStreamInfo->sampleRate;
+                aacParams->nFrameLength = mStreamInfo->frameSize;
             }
 
             return OMX_ErrorNone;
@@ -174,7 +175,7 @@
                 pcmParams->nChannels = 1;
                 pcmParams->nSamplingRate = 44100;
             } else {
-                pcmParams->nChannels = mStreamInfo->channelConfig;
+                pcmParams->nChannels = mStreamInfo->numChannels;
                 pcmParams->nSamplingRate = mStreamInfo->sampleRate;
             }
 
@@ -184,6 +185,7 @@
         default:
             return SimpleSoftOMXComponent::internalGetParameter(index, params);
     }
+
 }
 
 OMX_ERRORTYPE SoftAAC2::internalSetParameter(
@@ -253,7 +255,6 @@
     UCHAR* inBuffer[FILEREAD_MAX_LAYERS];
     UINT inBufferLength[FILEREAD_MAX_LAYERS] = {0};
     UINT bytesValid[FILEREAD_MAX_LAYERS] = {0};
-    AAC_DECODER_ERROR decoderErr;
 
     List<BufferInfo *> &inQueue = getPortQueue(0);
     List<BufferInfo *> &outQueue = getPortQueue(1);
@@ -276,7 +277,6 @@
             notify(OMX_EventError, OMX_ErrorUndefined, decoderErr, NULL);
             return;
         }
-
         inQueue.erase(inQueue.begin());
         info->mOwnedByUs = false;
         notifyEmptyBufferDone(header);
@@ -298,7 +298,22 @@
             inInfo->mOwnedByUs = false;
             notifyEmptyBufferDone(inHeader);
 
-            outHeader->nFilledLen = 0;
+            // flush out the decoder's delayed data by calling DecodeFrame one more time, with
+            // the AACDEC_FLUSH flag set
+            INT_PCM *outBuffer =
+                    reinterpret_cast<INT_PCM *>(outHeader->pBuffer + outHeader->nOffset);
+            AAC_DECODER_ERROR decoderErr = aacDecoder_DecodeFrame(mAACDecoder,
+                                                                  outBuffer,
+                                                                  outHeader->nAllocLen,
+                                                                  AACDEC_FLUSH);
+            if (decoderErr != AAC_DEC_OK) {
+                mSignalledError = true;
+                notify(OMX_EventError, OMX_ErrorUndefined, decoderErr, NULL);
+                return;
+            }
+
+            outHeader->nFilledLen =
+                    mStreamInfo->frameSize * sizeof(int16_t) * mStreamInfo->numChannels;
             outHeader->nFlags = OMX_BUFFERFLAG_EOS;
 
             outQueue.erase(outQueue.begin());
@@ -342,23 +357,27 @@
             inBufferLength[0] = inHeader->nFilledLen;
         }
 
-
         // Fill and decode
         INT_PCM *outBuffer = reinterpret_cast<INT_PCM *>(outHeader->pBuffer + outHeader->nOffset);
         bytesValid[0] = inBufferLength[0];
 
         int flags = mInputDiscontinuity ? AACDEC_INTR : 0;
         int prevSampleRate = mStreamInfo->sampleRate;
-        decoderErr = aacDecoder_Fill(mAACDecoder,
-                                     inBuffer,
-                                     inBufferLength,
-                                     bytesValid);
+        int prevNumChannels = mStreamInfo->numChannels;
 
-        decoderErr = aacDecoder_DecodeFrame(mAACDecoder,
-                                            outBuffer,
-                                            outHeader->nAllocLen,
-                                            flags);
+        AAC_DECODER_ERROR decoderErr = AAC_DEC_NOT_ENOUGH_BITS;
+        while (bytesValid[0] > 0 && decoderErr == AAC_DEC_NOT_ENOUGH_BITS) {
+            aacDecoder_Fill(mAACDecoder,
+                            inBuffer,
+                            inBufferLength,
+                            bytesValid);
 
+            decoderErr = aacDecoder_DecodeFrame(mAACDecoder,
+                                                outBuffer,
+                                                outHeader->nAllocLen,
+                                                flags);
+
+        }
         mInputDiscontinuity = false;
 
         /*
@@ -375,8 +394,9 @@
          * Thus, we could not say for sure whether a stream is
          * AAC+/eAAC+ until the first data frame is decoded.
          */
-        if (decoderErr == AAC_DEC_OK && mInputBufferCount <= 2) {
-            if (mStreamInfo->sampleRate != prevSampleRate) {
+        if (mInputBufferCount <= 2) {
+            if (mStreamInfo->sampleRate != prevSampleRate ||
+                mStreamInfo->numChannels != prevNumChannels) {
                 // We're going to want to revisit this input buffer, but
                 // may have already advanced the offset. Undo that if
                 // necessary.
@@ -412,6 +432,12 @@
             // We'll only output data if we successfully decoded it or
             // we've previously decoded valid data, in the latter case
             // (decode failed) we'll output a silent frame.
+            if (mIsFirst) {
+                mIsFirst = false;
+                // the first decoded frame should be discarded to account for decoder delay
+                numOutBytes = 0;
+            }
+
             outHeader->nFilledLen = numOutBytes;
             outHeader->nFlags = 0;
 
@@ -447,6 +473,7 @@
         // Make sure that the next buffer output does not still
         // depend on fragments from the last one decoded.
         mInputDiscontinuity = true;
+        mIsFirst = true;
     }
 }
 
diff --git a/media/libstagefright/codecs/aacdec/SoftAAC2.h b/media/libstagefright/codecs/aacdec/SoftAAC2.h
index d93685c..e5a1e3e 100644
--- a/media/libstagefright/codecs/aacdec/SoftAAC2.h
+++ b/media/libstagefright/codecs/aacdec/SoftAAC2.h
@@ -50,6 +50,7 @@
     HANDLE_AACDECODER mAACDecoder;
     CStreamInfo *mStreamInfo;
     bool mIsADTS;
+    bool mIsFirst;
     size_t mInputBufferCount;
     bool mSignalledError;
     bool mInputDiscontinuity;
diff --git a/media/libstagefright/codecs/aacenc/Android.mk b/media/libstagefright/codecs/aacenc/Android.mk
index 0ad3f6c..98e702e 100644
--- a/media/libstagefright/codecs/aacenc/Android.mk
+++ b/media/libstagefright/codecs/aacenc/Android.mk
@@ -2,7 +2,7 @@
 include $(CLEAR_VARS)
 include frameworks/av/media/libstagefright/codecs/common/Config.mk
 
-
+AAC_LIBRARY = fraunhofer
 
 LOCAL_SRC_FILES := basic_op/basicop2.c basic_op/oper_32b.c
 
@@ -90,24 +90,57 @@
 
 include $(CLEAR_VARS)
 
-LOCAL_SRC_FILES := \
-        SoftAACEncoder.cpp
+ifeq ($(AAC_LIBRARY), fraunhofer)
 
-LOCAL_C_INCLUDES := \
-	frameworks/av/media/libstagefright/include \
-	frameworks/av/media/libstagefright/codecs/common/include \
-	frameworks/native/include/media/openmax
+  include $(CLEAR_VARS)
 
-LOCAL_CFLAGS := -DOSCL_IMPORT_REF=
+  LOCAL_SRC_FILES := \
+          SoftAACEncoder2.cpp
 
-LOCAL_STATIC_LIBRARIES := \
-        libstagefright_aacenc
+  LOCAL_C_INCLUDES := \
+          frameworks/av/media/libstagefright/include \
+          frameworks/native/include/media/openmax \
+          external/aac/libAACenc/include \
+          external/aac/libFDK/include \
+          external/aac/libMpegTPEnc/include \
+          external/aac/libSBRenc/include \
+          external/aac/libSYS/include
 
-LOCAL_SHARED_LIBRARIES := \
-        libstagefright_omx libstagefright_foundation libutils \
-        libstagefright_enc_common
+  LOCAL_CFLAGS :=
 
-LOCAL_MODULE := libstagefright_soft_aacenc
-LOCAL_MODULE_TAGS := optional
+  LOCAL_STATIC_LIBRARIES := \
+          libAACenc libMpegTPEnc libSBRenc libFDK libSYS
 
-include $(BUILD_SHARED_LIBRARY)
+  LOCAL_SHARED_LIBRARIES := \
+          libstagefright_omx libstagefright_foundation libutils
+
+  LOCAL_MODULE := libstagefright_soft_aacenc
+  LOCAL_MODULE_TAGS := optional
+
+  include $(BUILD_SHARED_LIBRARY)
+
+else # visualon
+
+  LOCAL_SRC_FILES := \
+          SoftAACEncoder.cpp
+
+  LOCAL_C_INCLUDES := \
+          frameworks/av/media/libstagefright/include \
+          frameworks/av/media/libstagefright/codecs/common/include \
+          frameworks/native/include/media/openmax
+
+  LOCAL_CFLAGS := -DOSCL_IMPORT_REF=
+
+  LOCAL_STATIC_LIBRARIES := \
+          libstagefright_aacenc
+
+  LOCAL_SHARED_LIBRARIES := \
+          libstagefright_omx libstagefright_foundation libutils \
+          libstagefright_enc_common
+
+  LOCAL_MODULE := libstagefright_soft_aacenc
+  LOCAL_MODULE_TAGS := optional
+
+  include $(BUILD_SHARED_LIBRARY)
+
+endif # $(AAC_LIBRARY)
diff --git a/media/libstagefright/codecs/aacenc/SoftAACEncoder2.cpp b/media/libstagefright/codecs/aacenc/SoftAACEncoder2.cpp
new file mode 100644
index 0000000..7719435
--- /dev/null
+++ b/media/libstagefright/codecs/aacenc/SoftAACEncoder2.cpp
@@ -0,0 +1,574 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "SoftAACEncoder2"
+#include <utils/Log.h>
+
+#include "SoftAACEncoder2.h"
+
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/hexdump.h>
+
+namespace android {
+
+template<class T>
+static void InitOMXParams(T *params) {
+    params->nSize = sizeof(T);
+    params->nVersion.s.nVersionMajor = 1;
+    params->nVersion.s.nVersionMinor = 0;
+    params->nVersion.s.nRevision = 0;
+    params->nVersion.s.nStep = 0;
+}
+
+SoftAACEncoder2::SoftAACEncoder2(
+        const char *name,
+        const OMX_CALLBACKTYPE *callbacks,
+        OMX_PTR appData,
+        OMX_COMPONENTTYPE **component)
+    : SimpleSoftOMXComponent(name, callbacks, appData, component),
+      mAACEncoder(NULL),
+      mNumChannels(1),
+      mSampleRate(44100),
+      mBitRate(0),
+      mAACProfile(OMX_AUDIO_AACObjectLC),
+      mSentCodecSpecificData(false),
+      mInputSize(0),
+      mInputFrame(NULL),
+      mInputTimeUs(-1ll),
+      mSawInputEOS(false),
+      mSignalledError(false) {
+    initPorts();
+    CHECK_EQ(initEncoder(), (status_t)OK);
+    setAudioParams();
+}
+
+SoftAACEncoder2::~SoftAACEncoder2() {
+    aacEncClose(&mAACEncoder);
+
+    delete[] mInputFrame;
+    mInputFrame = NULL;
+}
+
+void SoftAACEncoder2::initPorts() {
+    OMX_PARAM_PORTDEFINITIONTYPE def;
+    InitOMXParams(&def);
+
+    def.nPortIndex = 0;
+    def.eDir = OMX_DirInput;
+    def.nBufferCountMin = kNumBuffers;
+    def.nBufferCountActual = def.nBufferCountMin;
+    def.nBufferSize = kNumSamplesPerFrame * sizeof(int16_t) * 2;
+    def.bEnabled = OMX_TRUE;
+    def.bPopulated = OMX_FALSE;
+    def.eDomain = OMX_PortDomainAudio;
+    def.bBuffersContiguous = OMX_FALSE;
+    def.nBufferAlignment = 1;
+
+    def.format.audio.cMIMEType = const_cast<char *>("audio/raw");
+    def.format.audio.pNativeRender = NULL;
+    def.format.audio.bFlagErrorConcealment = OMX_FALSE;
+    def.format.audio.eEncoding = OMX_AUDIO_CodingPCM;
+
+    addPort(def);
+
+    def.nPortIndex = 1;
+    def.eDir = OMX_DirOutput;
+    def.nBufferCountMin = kNumBuffers;
+    def.nBufferCountActual = def.nBufferCountMin;
+    def.nBufferSize = 8192;
+    def.bEnabled = OMX_TRUE;
+    def.bPopulated = OMX_FALSE;
+    def.eDomain = OMX_PortDomainAudio;
+    def.bBuffersContiguous = OMX_FALSE;
+    def.nBufferAlignment = 2;
+
+    def.format.audio.cMIMEType = const_cast<char *>("audio/aac");
+    def.format.audio.pNativeRender = NULL;
+    def.format.audio.bFlagErrorConcealment = OMX_FALSE;
+    def.format.audio.eEncoding = OMX_AUDIO_CodingAAC;
+
+    addPort(def);
+}
+
+status_t SoftAACEncoder2::initEncoder() {
+    if (AACENC_OK != aacEncOpen(&mAACEncoder, 0, 0)) {
+        ALOGE("Failed to init AAC encoder");
+        return UNKNOWN_ERROR;
+    }
+    return OK;
+}
+
+OMX_ERRORTYPE SoftAACEncoder2::internalGetParameter(
+        OMX_INDEXTYPE index, OMX_PTR params) {
+    switch (index) {
+        case OMX_IndexParamAudioPortFormat:
+        {
+            OMX_AUDIO_PARAM_PORTFORMATTYPE *formatParams =
+                (OMX_AUDIO_PARAM_PORTFORMATTYPE *)params;
+
+            if (formatParams->nPortIndex > 1) {
+                return OMX_ErrorUndefined;
+            }
+
+            if (formatParams->nIndex > 0) {
+                return OMX_ErrorNoMore;
+            }
+
+            formatParams->eEncoding =
+                (formatParams->nPortIndex == 0)
+                    ? OMX_AUDIO_CodingPCM : OMX_AUDIO_CodingAAC;
+
+            return OMX_ErrorNone;
+        }
+
+        case OMX_IndexParamAudioAac:
+        {
+            OMX_AUDIO_PARAM_AACPROFILETYPE *aacParams =
+                (OMX_AUDIO_PARAM_AACPROFILETYPE *)params;
+
+            if (aacParams->nPortIndex != 1) {
+                return OMX_ErrorUndefined;
+            }
+
+            aacParams->nBitRate = mBitRate;
+            aacParams->nAudioBandWidth = 0;
+            aacParams->nAACtools = 0;
+            aacParams->nAACERtools = 0;
+            aacParams->eAACProfile = (OMX_AUDIO_AACPROFILETYPE) mAACProfile;
+            aacParams->eAACStreamFormat = OMX_AUDIO_AACStreamFormatMP4FF;
+            aacParams->eChannelMode = OMX_AUDIO_ChannelModeStereo;
+
+            aacParams->nChannels = mNumChannels;
+            aacParams->nSampleRate = mSampleRate;
+            aacParams->nFrameLength = 0;
+
+            return OMX_ErrorNone;
+        }
+
+        case OMX_IndexParamAudioPcm:
+        {
+            OMX_AUDIO_PARAM_PCMMODETYPE *pcmParams =
+                (OMX_AUDIO_PARAM_PCMMODETYPE *)params;
+
+            if (pcmParams->nPortIndex != 0) {
+                return OMX_ErrorUndefined;
+            }
+
+            pcmParams->eNumData = OMX_NumericalDataSigned;
+            pcmParams->eEndian = OMX_EndianBig;
+            pcmParams->bInterleaved = OMX_TRUE;
+            pcmParams->nBitPerSample = 16;
+            pcmParams->ePCMMode = OMX_AUDIO_PCMModeLinear;
+            pcmParams->eChannelMapping[0] = OMX_AUDIO_ChannelLF;
+            pcmParams->eChannelMapping[1] = OMX_AUDIO_ChannelRF;
+
+            pcmParams->nChannels = mNumChannels;
+            pcmParams->nSamplingRate = mSampleRate;
+
+            return OMX_ErrorNone;
+        }
+
+        default:
+            return SimpleSoftOMXComponent::internalGetParameter(index, params);
+    }
+}
+
+OMX_ERRORTYPE SoftAACEncoder2::internalSetParameter(
+        OMX_INDEXTYPE index, const OMX_PTR params) {
+    switch (index) {
+        case OMX_IndexParamStandardComponentRole:
+        {
+            const OMX_PARAM_COMPONENTROLETYPE *roleParams =
+                (const OMX_PARAM_COMPONENTROLETYPE *)params;
+
+            if (strncmp((const char *)roleParams->cRole,
+                        "audio_encoder.aac",
+                        OMX_MAX_STRINGNAME_SIZE - 1)) {
+                return OMX_ErrorUndefined;
+            }
+
+            return OMX_ErrorNone;
+        }
+
+        case OMX_IndexParamAudioPortFormat:
+        {
+            const OMX_AUDIO_PARAM_PORTFORMATTYPE *formatParams =
+                (const OMX_AUDIO_PARAM_PORTFORMATTYPE *)params;
+
+            if (formatParams->nPortIndex > 1) {
+                return OMX_ErrorUndefined;
+            }
+
+            if (formatParams->nIndex > 0) {
+                return OMX_ErrorNoMore;
+            }
+
+            if ((formatParams->nPortIndex == 0
+                        && formatParams->eEncoding != OMX_AUDIO_CodingPCM)
+                || (formatParams->nPortIndex == 1
+                        && formatParams->eEncoding != OMX_AUDIO_CodingAAC)) {
+                return OMX_ErrorUndefined;
+            }
+
+            return OMX_ErrorNone;
+        }
+
+        case OMX_IndexParamAudioAac:
+        {
+            OMX_AUDIO_PARAM_AACPROFILETYPE *aacParams =
+                (OMX_AUDIO_PARAM_AACPROFILETYPE *)params;
+
+            if (aacParams->nPortIndex != 1) {
+                return OMX_ErrorUndefined;
+            }
+
+            mBitRate = aacParams->nBitRate;
+            mNumChannels = aacParams->nChannels;
+            mSampleRate = aacParams->nSampleRate;
+            if (aacParams->eAACProfile != OMX_AUDIO_AACObjectNull) {
+                mAACProfile = aacParams->eAACProfile;
+            }
+
+            if (setAudioParams() != OK) {
+                return OMX_ErrorUndefined;
+            }
+
+            return OMX_ErrorNone;
+        }
+
+        case OMX_IndexParamAudioPcm:
+        {
+            OMX_AUDIO_PARAM_PCMMODETYPE *pcmParams =
+                (OMX_AUDIO_PARAM_PCMMODETYPE *)params;
+
+            if (pcmParams->nPortIndex != 0) {
+                return OMX_ErrorUndefined;
+            }
+
+            mNumChannels = pcmParams->nChannels;
+            mSampleRate = pcmParams->nSamplingRate;
+            if (setAudioParams() != OK) {
+                return OMX_ErrorUndefined;
+            }
+
+            return OMX_ErrorNone;
+        }
+
+        default:
+            return SimpleSoftOMXComponent::internalSetParameter(index, params);
+    }
+}
+
+static CHANNEL_MODE getChannelMode(OMX_U32 nChannels) {
+    CHANNEL_MODE chMode = MODE_INVALID;
+    switch (nChannels) {
+        case 1: chMode = MODE_1; break;
+        case 2: chMode = MODE_2; break;
+        case 3: chMode = MODE_1_2; break;
+        case 4: chMode = MODE_1_2_1; break;
+        case 5: chMode = MODE_1_2_2; break;
+        case 6: chMode = MODE_1_2_2_1; break;
+        default: chMode = MODE_INVALID;
+    }
+    return chMode;
+}
+
+static AUDIO_OBJECT_TYPE getAOTFromProfile(OMX_U32 profile) {
+    if (profile == OMX_AUDIO_AACObjectLC) {
+        return AOT_AAC_LC;
+    } else if (profile == OMX_AUDIO_AACObjectHE) {
+        return AOT_SBR;
+    } else if (profile == OMX_AUDIO_AACObjectELD) {
+        return AOT_ER_AAC_ELD;
+    } else {
+        ALOGW("Unsupported AAC profile - defaulting to AAC-LC");
+        return AOT_AAC_LC;
+    }
+}
+
+status_t SoftAACEncoder2::setAudioParams() {
+    // We call this whenever sample rate, number of channels or bitrate change
+    // in reponse to setParameter calls.
+
+    ALOGV("setAudioParams: %lu Hz, %lu channels, %lu bps",
+         mSampleRate, mNumChannels, mBitRate);
+
+    if (AACENC_OK != aacEncoder_SetParam(mAACEncoder, AACENC_AOT,
+            getAOTFromProfile(mAACProfile))) {
+        ALOGE("Failed to set AAC encoder parameters");
+        return UNKNOWN_ERROR;
+    }
+
+    if (AACENC_OK != aacEncoder_SetParam(mAACEncoder, AACENC_SAMPLERATE, mSampleRate)) {
+        ALOGE("Failed to set AAC encoder parameters");
+        return UNKNOWN_ERROR;
+    }
+    if (AACENC_OK != aacEncoder_SetParam(mAACEncoder, AACENC_BITRATE, mBitRate)) {
+        ALOGE("Failed to set AAC encoder parameters");
+        return UNKNOWN_ERROR;
+    }
+    if (AACENC_OK != aacEncoder_SetParam(mAACEncoder, AACENC_CHANNELMODE,
+            getChannelMode(mNumChannels))) {
+        ALOGE("Failed to set AAC encoder parameters");
+        return UNKNOWN_ERROR;
+    }
+    if (AACENC_OK != aacEncoder_SetParam(mAACEncoder, AACENC_TRANSMUX, TT_MP4_RAW)) {
+        ALOGE("Failed to set AAC encoder parameters");
+        return UNKNOWN_ERROR;
+    }
+
+    return OK;
+}
+
+void SoftAACEncoder2::onQueueFilled(OMX_U32 portIndex) {
+    if (mSignalledError) {
+        return;
+    }
+
+    List<BufferInfo *> &inQueue = getPortQueue(0);
+    List<BufferInfo *> &outQueue = getPortQueue(1);
+
+    if (!mSentCodecSpecificData) {
+        // The very first thing we want to output is the codec specific
+        // data. It does not require any input data but we will need an
+        // output buffer to store it in.
+
+        if (outQueue.empty()) {
+            return;
+        }
+
+        if (AACENC_OK != aacEncEncode(mAACEncoder, NULL, NULL, NULL, NULL)) {
+            ALOGE("Unable to initialize encoder for profile / sample-rate / bit-rate / channels");
+            notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL);
+            mSignalledError = true;
+            return;
+        }
+
+        OMX_U32 actualBitRate  = aacEncoder_GetParam(mAACEncoder, AACENC_BITRATE);
+        if (mBitRate != actualBitRate) {
+            ALOGW("Requested bitrate %lu unsupported, using %lu", mBitRate, actualBitRate);
+        }
+
+        AACENC_InfoStruct encInfo;
+        if (AACENC_OK != aacEncInfo(mAACEncoder, &encInfo)) {
+            ALOGE("Failed to get AAC encoder info");
+            notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL);
+            mSignalledError = true;
+            return;
+        }
+
+        BufferInfo *outInfo = *outQueue.begin();
+        OMX_BUFFERHEADERTYPE *outHeader = outInfo->mHeader;
+        outHeader->nFilledLen = encInfo.confSize;
+        outHeader->nFlags = OMX_BUFFERFLAG_CODECCONFIG;
+
+        uint8_t *out = outHeader->pBuffer + outHeader->nOffset;
+        memcpy(out, encInfo.confBuf, encInfo.confSize);
+
+        outQueue.erase(outQueue.begin());
+        outInfo->mOwnedByUs = false;
+        notifyFillBufferDone(outHeader);
+
+        mSentCodecSpecificData = true;
+    }
+
+    size_t numBytesPerInputFrame =
+        mNumChannels * kNumSamplesPerFrame * sizeof(int16_t);
+
+    // Limit input size so we only get one ELD frame
+    if (mAACProfile == OMX_AUDIO_AACObjectELD && numBytesPerInputFrame > 512) {
+        numBytesPerInputFrame = 512;
+    }
+
+    for (;;) {
+        // We do the following until we run out of buffers.
+
+        while (mInputSize < numBytesPerInputFrame) {
+            // As long as there's still input data to be read we
+            // will drain "kNumSamplesPerFrame * mNumChannels" samples
+            // into the "mInputFrame" buffer and then encode those
+            // as a unit into an output buffer.
+
+            if (mSawInputEOS || inQueue.empty()) {
+                return;
+            }
+
+            BufferInfo *inInfo = *inQueue.begin();
+            OMX_BUFFERHEADERTYPE *inHeader = inInfo->mHeader;
+
+            const void *inData = inHeader->pBuffer + inHeader->nOffset;
+
+            size_t copy = numBytesPerInputFrame - mInputSize;
+            if (copy > inHeader->nFilledLen) {
+                copy = inHeader->nFilledLen;
+            }
+
+            if (mInputFrame == NULL) {
+                mInputFrame = new int16_t[numBytesPerInputFrame / sizeof(int16_t)];
+            }
+
+            if (mInputSize == 0) {
+                mInputTimeUs = inHeader->nTimeStamp;
+            }
+
+            memcpy((uint8_t *)mInputFrame + mInputSize, inData, copy);
+            mInputSize += copy;
+
+            inHeader->nOffset += copy;
+            inHeader->nFilledLen -= copy;
+
+            // "Time" on the input buffer has in effect advanced by the
+            // number of audio frames we just advanced nOffset by.
+            inHeader->nTimeStamp +=
+                (copy * 1000000ll / mSampleRate)
+                    / (mNumChannels * sizeof(int16_t));
+
+            if (inHeader->nFilledLen == 0) {
+                if (inHeader->nFlags & OMX_BUFFERFLAG_EOS) {
+                    mSawInputEOS = true;
+
+                    // Pad any remaining data with zeroes.
+                    memset((uint8_t *)mInputFrame + mInputSize,
+                           0,
+                           numBytesPerInputFrame - mInputSize);
+
+                    mInputSize = numBytesPerInputFrame;
+                }
+
+                inQueue.erase(inQueue.begin());
+                inInfo->mOwnedByUs = false;
+                notifyEmptyBufferDone(inHeader);
+
+                inData = NULL;
+                inHeader = NULL;
+                inInfo = NULL;
+            }
+        }
+
+        // At this  point we have all the input data necessary to encode
+        // a single frame, all we need is an output buffer to store the result
+        // in.
+
+        if (outQueue.empty()) {
+            return;
+        }
+
+        BufferInfo *outInfo = *outQueue.begin();
+        OMX_BUFFERHEADERTYPE *outHeader = outInfo->mHeader;
+
+        uint8_t *outPtr = (uint8_t *)outHeader->pBuffer + outHeader->nOffset;
+        size_t outAvailable = outHeader->nAllocLen - outHeader->nOffset;
+
+        AACENC_InArgs inargs;
+        AACENC_OutArgs outargs;
+        memset(&inargs, 0, sizeof(inargs));
+        memset(&outargs, 0, sizeof(outargs));
+        inargs.numInSamples = numBytesPerInputFrame / sizeof(int16_t);
+
+        void* inBuffer[]        = { (unsigned char *)mInputFrame };
+        INT   inBufferIds[]     = { IN_AUDIO_DATA };
+        INT   inBufferSize[]    = { numBytesPerInputFrame };
+        INT   inBufferElSize[]  = { sizeof(int16_t) };
+
+        AACENC_BufDesc inBufDesc;
+        inBufDesc.numBufs           = sizeof(inBuffer) / sizeof(void*);
+        inBufDesc.bufs              = (void**)&inBuffer;
+        inBufDesc.bufferIdentifiers = inBufferIds;
+        inBufDesc.bufSizes          = inBufferSize;
+        inBufDesc.bufElSizes        = inBufferElSize;
+
+        void* outBuffer[]       = { outPtr };
+        INT   outBufferIds[]    = { OUT_BITSTREAM_DATA };
+        INT   outBufferSize[]   = { 0 };
+        INT   outBufferElSize[] = { sizeof(UCHAR) };
+
+        AACENC_BufDesc outBufDesc;
+        outBufDesc.numBufs           = sizeof(outBuffer) / sizeof(void*);
+        outBufDesc.bufs              = (void**)&outBuffer;
+        outBufDesc.bufferIdentifiers = outBufferIds;
+        outBufDesc.bufSizes          = outBufferSize;
+        outBufDesc.bufElSizes        = outBufferElSize;
+
+        // Encode the mInputFrame, which is treated as a modulo buffer
+        AACENC_ERROR encoderErr = AACENC_OK;
+        size_t nOutputBytes = 0;
+
+        do {
+            memset(&outargs, 0, sizeof(outargs));
+
+            outBuffer[0] = outPtr;
+            outBufferSize[0] = outAvailable - nOutputBytes;
+
+            encoderErr = aacEncEncode(mAACEncoder,
+                                      &inBufDesc,
+                                      &outBufDesc,
+                                      &inargs,
+                                      &outargs);
+
+            if (encoderErr == AACENC_OK) {
+                outPtr += outargs.numOutBytes;
+                nOutputBytes += outargs.numOutBytes;
+
+                if (outargs.numInSamples > 0) {
+                    int numRemainingSamples = inargs.numInSamples - outargs.numInSamples;
+                    if (numRemainingSamples > 0) {
+                        memmove(mInputFrame,
+                                &mInputFrame[outargs.numInSamples],
+                                sizeof(int16_t) * numRemainingSamples);
+                    }
+                    inargs.numInSamples -= outargs.numInSamples;
+                }
+            }
+        } while (encoderErr == AACENC_OK && inargs.numInSamples > 0);
+
+        outHeader->nFilledLen = nOutputBytes;
+
+        outHeader->nFlags = OMX_BUFFERFLAG_ENDOFFRAME;
+
+        if (mSawInputEOS) {
+            // We also tag this output buffer with EOS if it corresponds
+            // to the final input buffer.
+            outHeader->nFlags = OMX_BUFFERFLAG_EOS;
+        }
+
+        outHeader->nTimeStamp = mInputTimeUs;
+
+#if 0
+        ALOGI("sending %d bytes of data (time = %lld us, flags = 0x%08lx)",
+              nOutputBytes, mInputTimeUs, outHeader->nFlags);
+
+        hexdump(outHeader->pBuffer + outHeader->nOffset, outHeader->nFilledLen);
+#endif
+
+        outQueue.erase(outQueue.begin());
+        outInfo->mOwnedByUs = false;
+        notifyFillBufferDone(outHeader);
+
+        outHeader = NULL;
+        outInfo = NULL;
+
+        mInputSize = 0;
+    }
+}
+
+}  // namespace android
+
+android::SoftOMXComponent *createSoftOMXComponent(
+        const char *name, const OMX_CALLBACKTYPE *callbacks,
+        OMX_PTR appData, OMX_COMPONENTTYPE **component) {
+    return new android::SoftAACEncoder2(name, callbacks, appData, component);
+}
diff --git a/media/libstagefright/codecs/aacenc/SoftAACEncoder2.h b/media/libstagefright/codecs/aacenc/SoftAACEncoder2.h
new file mode 100644
index 0000000..2603f4f
--- /dev/null
+++ b/media/libstagefright/codecs/aacenc/SoftAACEncoder2.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SOFT_AAC_ENCODER_2_H_
+
+#define SOFT_AAC_ENCODER_2_H_
+
+#include "SimpleSoftOMXComponent.h"
+
+#include "aacenc_lib.h"
+
+namespace android {
+
+struct SoftAACEncoder2 : public SimpleSoftOMXComponent {
+    SoftAACEncoder2(
+            const char *name,
+            const OMX_CALLBACKTYPE *callbacks,
+            OMX_PTR appData,
+            OMX_COMPONENTTYPE **component);
+
+protected:
+    virtual ~SoftAACEncoder2();
+
+    virtual OMX_ERRORTYPE internalGetParameter(
+            OMX_INDEXTYPE index, OMX_PTR params);
+
+    virtual OMX_ERRORTYPE internalSetParameter(
+            OMX_INDEXTYPE index, const OMX_PTR params);
+
+    virtual void onQueueFilled(OMX_U32 portIndex);
+
+private:
+    enum {
+        kNumBuffers             = 4,
+        kNumSamplesPerFrame     = 1024
+    };
+
+    HANDLE_AACENCODER mAACEncoder;
+
+    OMX_U32 mNumChannels;
+    OMX_U32 mSampleRate;
+    OMX_U32 mBitRate;
+    OMX_U32 mAACProfile;
+
+    bool mSentCodecSpecificData;
+    size_t mInputSize;
+    int16_t *mInputFrame;
+    int64_t mInputTimeUs;
+
+    bool mSawInputEOS;
+
+    bool mSignalledError;
+
+    void initPorts();
+    status_t initEncoder();
+
+    status_t setAudioParams();
+
+    DISALLOW_EVIL_CONSTRUCTORS(SoftAACEncoder2);
+};
+
+}  // namespace android
+
+#endif  // SOFT_AAC_ENCODER_2_H_
diff --git a/media/libstagefright/codecs/avc/enc/AVCEncoder.cpp b/media/libstagefright/codecs/avc/enc/AVCEncoder.cpp
deleted file mode 100644
index 7533f07..0000000
--- a/media/libstagefright/codecs/avc/enc/AVCEncoder.cpp
+++ /dev/null
@@ -1,619 +0,0 @@
-/*
- * Copyright (C) 2010 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "AVCEncoder"
-#include <utils/Log.h>
-
-#include "AVCEncoder.h"
-
-#include "avcenc_api.h"
-#include "avcenc_int.h"
-#include "OMX_Video.h"
-
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/MediaBufferGroup.h>
-#include <media/stagefright/MediaDefs.h>
-#include <media/stagefright/MediaErrors.h>
-#include <media/stagefright/MetaData.h>
-#include <media/stagefright/Utils.h>
-
-namespace android {
-
-static status_t ConvertOmxAvcProfileToAvcSpecProfile(
-        int32_t omxProfile, AVCProfile* pvProfile) {
-    ALOGV("ConvertOmxAvcProfileToAvcSpecProfile: %d", omxProfile);
-    switch (omxProfile) {
-        case OMX_VIDEO_AVCProfileBaseline:
-            *pvProfile = AVC_BASELINE;
-            return OK;
-        default:
-            ALOGE("Unsupported omx profile: %d", omxProfile);
-    }
-    return BAD_VALUE;
-}
-
-static status_t ConvertOmxAvcLevelToAvcSpecLevel(
-        int32_t omxLevel, AVCLevel *pvLevel) {
-    ALOGV("ConvertOmxAvcLevelToAvcSpecLevel: %d", omxLevel);
-    AVCLevel level = AVC_LEVEL5_1;
-    switch (omxLevel) {
-        case OMX_VIDEO_AVCLevel1:
-            level = AVC_LEVEL1_B;
-            break;
-        case OMX_VIDEO_AVCLevel1b:
-            level = AVC_LEVEL1;
-            break;
-        case OMX_VIDEO_AVCLevel11:
-            level = AVC_LEVEL1_1;
-            break;
-        case OMX_VIDEO_AVCLevel12:
-            level = AVC_LEVEL1_2;
-            break;
-        case OMX_VIDEO_AVCLevel13:
-            level = AVC_LEVEL1_3;
-            break;
-        case OMX_VIDEO_AVCLevel2:
-            level = AVC_LEVEL2;
-            break;
-        case OMX_VIDEO_AVCLevel21:
-            level = AVC_LEVEL2_1;
-            break;
-        case OMX_VIDEO_AVCLevel22:
-            level = AVC_LEVEL2_2;
-            break;
-        case OMX_VIDEO_AVCLevel3:
-            level = AVC_LEVEL3;
-            break;
-        case OMX_VIDEO_AVCLevel31:
-            level = AVC_LEVEL3_1;
-            break;
-        case OMX_VIDEO_AVCLevel32:
-            level = AVC_LEVEL3_2;
-            break;
-        case OMX_VIDEO_AVCLevel4:
-            level = AVC_LEVEL4;
-            break;
-        case OMX_VIDEO_AVCLevel41:
-            level = AVC_LEVEL4_1;
-            break;
-        case OMX_VIDEO_AVCLevel42:
-            level = AVC_LEVEL4_2;
-            break;
-        case OMX_VIDEO_AVCLevel5:
-            level = AVC_LEVEL5;
-            break;
-        case OMX_VIDEO_AVCLevel51:
-            level = AVC_LEVEL5_1;
-            break;
-        default:
-            ALOGE("Unknown omx level: %d", omxLevel);
-            return BAD_VALUE;
-    }
-    *pvLevel = level;
-    return OK;
-}
-
-inline static void ConvertYUV420SemiPlanarToYUV420Planar(
-        uint8_t *inyuv, uint8_t* outyuv,
-        int32_t width, int32_t height) {
-
-    int32_t outYsize = width * height;
-    uint32_t *outy =  (uint32_t *) outyuv;
-    uint16_t *outcb = (uint16_t *) (outyuv + outYsize);
-    uint16_t *outcr = (uint16_t *) (outyuv + outYsize + (outYsize >> 2));
-
-    /* Y copying */
-    memcpy(outy, inyuv, outYsize);
-
-    /* U & V copying */
-    uint32_t *inyuv_4 = (uint32_t *) (inyuv + outYsize);
-    for (int32_t i = height >> 1; i > 0; --i) {
-        for (int32_t j = width >> 2; j > 0; --j) {
-            uint32_t temp = *inyuv_4++;
-            uint32_t tempU = temp & 0xFF;
-            tempU = tempU | ((temp >> 8) & 0xFF00);
-
-            uint32_t tempV = (temp >> 8) & 0xFF;
-            tempV = tempV | ((temp >> 16) & 0xFF00);
-
-            // Flip U and V
-            *outcb++ = tempV;
-            *outcr++ = tempU;
-        }
-    }
-}
-
-static int32_t MallocWrapper(
-        void *userData, int32_t size, int32_t attrs) {
-    return reinterpret_cast<int32_t>(malloc(size));
-}
-
-static void FreeWrapper(void *userData, int32_t ptr) {
-    free(reinterpret_cast<void *>(ptr));
-}
-
-static int32_t DpbAllocWrapper(void *userData,
-        unsigned int sizeInMbs, unsigned int numBuffers) {
-    AVCEncoder *encoder = static_cast<AVCEncoder *>(userData);
-    CHECK(encoder != NULL);
-    return encoder->allocOutputBuffers(sizeInMbs, numBuffers);
-}
-
-static int32_t BindFrameWrapper(
-        void *userData, int32_t index, uint8_t **yuv) {
-    AVCEncoder *encoder = static_cast<AVCEncoder *>(userData);
-    CHECK(encoder != NULL);
-    return encoder->bindOutputBuffer(index, yuv);
-}
-
-static void UnbindFrameWrapper(void *userData, int32_t index) {
-    AVCEncoder *encoder = static_cast<AVCEncoder *>(userData);
-    CHECK(encoder != NULL);
-    return encoder->unbindOutputBuffer(index);
-}
-
-AVCEncoder::AVCEncoder(
-        const sp<MediaSource>& source,
-        const sp<MetaData>& meta)
-    : mSource(source),
-      mMeta(meta),
-      mNumInputFrames(-1),
-      mPrevTimestampUs(-1),
-      mStarted(false),
-      mInputBuffer(NULL),
-      mInputFrameData(NULL),
-      mGroup(NULL) {
-
-    ALOGI("Construct software AVCEncoder");
-
-    mHandle = new tagAVCHandle;
-    memset(mHandle, 0, sizeof(tagAVCHandle));
-    mHandle->AVCObject = NULL;
-    mHandle->userData = this;
-    mHandle->CBAVC_DPBAlloc = DpbAllocWrapper;
-    mHandle->CBAVC_FrameBind = BindFrameWrapper;
-    mHandle->CBAVC_FrameUnbind = UnbindFrameWrapper;
-    mHandle->CBAVC_Malloc = MallocWrapper;
-    mHandle->CBAVC_Free = FreeWrapper;
-
-    mInitCheck = initCheck(meta);
-}
-
-AVCEncoder::~AVCEncoder() {
-    ALOGV("Destruct software AVCEncoder");
-    if (mStarted) {
-        stop();
-    }
-
-    delete mEncParams;
-    delete mHandle;
-}
-
-status_t AVCEncoder::initCheck(const sp<MetaData>& meta) {
-    ALOGV("initCheck");
-    CHECK(meta->findInt32(kKeyWidth, &mVideoWidth));
-    CHECK(meta->findInt32(kKeyHeight, &mVideoHeight));
-    CHECK(meta->findInt32(kKeyFrameRate, &mVideoFrameRate));
-    CHECK(meta->findInt32(kKeyBitRate, &mVideoBitRate));
-
-    // XXX: Add more color format support
-    CHECK(meta->findInt32(kKeyColorFormat, &mVideoColorFormat));
-    if (mVideoColorFormat != OMX_COLOR_FormatYUV420Planar) {
-        if (mVideoColorFormat != OMX_COLOR_FormatYUV420SemiPlanar) {
-            ALOGE("Color format %d is not supported", mVideoColorFormat);
-            return BAD_VALUE;
-        }
-        // Allocate spare buffer only when color conversion is needed.
-        // Assume the color format is OMX_COLOR_FormatYUV420SemiPlanar.
-        mInputFrameData =
-            (uint8_t *) malloc((mVideoWidth * mVideoHeight * 3 ) >> 1);
-        CHECK(mInputFrameData);
-    }
-
-    // XXX: Remove this restriction
-    if (mVideoWidth % 16 != 0 || mVideoHeight % 16 != 0) {
-        ALOGE("Video frame size %dx%d must be a multiple of 16",
-            mVideoWidth, mVideoHeight);
-        return BAD_VALUE;
-    }
-
-    mEncParams = new tagAVCEncParam;
-    memset(mEncParams, 0, sizeof(mEncParams));
-    mEncParams->width = mVideoWidth;
-    mEncParams->height = mVideoHeight;
-    mEncParams->frame_rate = 1000 * mVideoFrameRate;  // In frames/ms!
-    mEncParams->rate_control = AVC_ON;
-    mEncParams->bitrate = mVideoBitRate;
-    mEncParams->initQP = 0;
-    mEncParams->init_CBP_removal_delay = 1600;
-    mEncParams->CPB_size = (uint32_t) (mVideoBitRate >> 1);
-
-    mEncParams->intramb_refresh = 0;
-    mEncParams->auto_scd = AVC_ON;
-    mEncParams->out_of_band_param_set = AVC_ON;
-    mEncParams->poc_type = 2;
-    mEncParams->log2_max_poc_lsb_minus_4 = 12;
-    mEncParams->delta_poc_zero_flag = 0;
-    mEncParams->offset_poc_non_ref = 0;
-    mEncParams->offset_top_bottom = 0;
-    mEncParams->num_ref_in_cycle = 0;
-    mEncParams->offset_poc_ref = NULL;
-
-    mEncParams->num_ref_frame = 1;
-    mEncParams->num_slice_group = 1;
-    mEncParams->fmo_type = 0;
-
-    mEncParams->db_filter = AVC_ON;
-    mEncParams->disable_db_idc = 0;
-
-    mEncParams->alpha_offset = 0;
-    mEncParams->beta_offset = 0;
-    mEncParams->constrained_intra_pred = AVC_OFF;
-
-    mEncParams->data_par = AVC_OFF;
-    mEncParams->fullsearch = AVC_OFF;
-    mEncParams->search_range = 16;
-    mEncParams->sub_pel = AVC_OFF;
-    mEncParams->submb_pred = AVC_OFF;
-    mEncParams->rdopt_mode = AVC_OFF;
-    mEncParams->bidir_pred = AVC_OFF;
-    int32_t nMacroBlocks = ((((mVideoWidth + 15) >> 4) << 4) *
-            (((mVideoHeight + 15) >> 4) << 4)) >> 8;
-    uint32_t *sliceGroup = (uint32_t *) malloc(sizeof(uint32_t) * nMacroBlocks);
-    for (int ii = 0, idx = 0; ii < nMacroBlocks; ++ii) {
-        sliceGroup[ii] = idx++;
-        if (idx >= mEncParams->num_slice_group) {
-            idx = 0;
-        }
-    }
-    mEncParams->slice_group = sliceGroup;
-
-    mEncParams->use_overrun_buffer = AVC_OFF;
-
-    // Set IDR frame refresh interval
-    int32_t iFramesIntervalSec;
-    CHECK(meta->findInt32(kKeyIFramesInterval, &iFramesIntervalSec));
-    if (iFramesIntervalSec < 0) {
-        mEncParams->idr_period = -1;
-    } else if (iFramesIntervalSec == 0) {
-        mEncParams->idr_period = 1;  // All I frames
-    } else {
-        mEncParams->idr_period =
-            (iFramesIntervalSec * mVideoFrameRate);
-    }
-    ALOGV("idr_period: %d, I-frames interval: %d seconds, and frame rate: %d",
-        mEncParams->idr_period, iFramesIntervalSec, mVideoFrameRate);
-
-    // Set profile and level
-    // If profile and level setting is not correct, failure
-    // is reported when the encoder is initialized.
-    mEncParams->profile = AVC_BASELINE;
-    mEncParams->level = AVC_LEVEL3_2;
-    int32_t profile, level;
-    if (meta->findInt32(kKeyVideoProfile, &profile)) {
-        if (OK != ConvertOmxAvcProfileToAvcSpecProfile(
-                        profile, &mEncParams->profile)) {
-            return BAD_VALUE;
-        }
-    }
-    if (meta->findInt32(kKeyVideoLevel, &level)) {
-        if (OK != ConvertOmxAvcLevelToAvcSpecLevel(
-                        level, &mEncParams->level)) {
-            return BAD_VALUE;
-        }
-    }
-
-
-    mFormat = new MetaData;
-    mFormat->setInt32(kKeyWidth, mVideoWidth);
-    mFormat->setInt32(kKeyHeight, mVideoHeight);
-    mFormat->setInt32(kKeyBitRate, mVideoBitRate);
-    mFormat->setInt32(kKeyFrameRate, mVideoFrameRate);
-    mFormat->setInt32(kKeyColorFormat, mVideoColorFormat);
-    mFormat->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_AVC);
-    mFormat->setCString(kKeyDecoderComponent, "AVCEncoder");
-    return OK;
-}
-
-status_t AVCEncoder::start(MetaData *params) {
-    ALOGV("start");
-    if (mInitCheck != OK) {
-        return mInitCheck;
-    }
-
-    if (mStarted) {
-        ALOGW("Call start() when encoder already started");
-        return OK;
-    }
-
-    AVCEnc_Status err;
-    err = PVAVCEncInitialize(mHandle, mEncParams, NULL, NULL);
-    if (err != AVCENC_SUCCESS) {
-        ALOGE("Failed to initialize the encoder: %d", err);
-        return UNKNOWN_ERROR;
-    }
-
-    mGroup = new MediaBufferGroup();
-    int32_t maxSize;
-    if (AVCENC_SUCCESS !=
-        PVAVCEncGetMaxOutputBufferSize(mHandle, &maxSize)) {
-        maxSize = 31584;  // Magic #
-    }
-    mGroup->add_buffer(new MediaBuffer(maxSize));
-
-    mSource->start(params);
-    mNumInputFrames = -2;  // 1st two buffers contain SPS and PPS
-    mStarted = true;
-    mSpsPpsHeaderReceived = false;
-    mReadyForNextFrame = true;
-    mIsIDRFrame = 0;
-
-    return OK;
-}
-
-status_t AVCEncoder::stop() {
-    ALOGV("stop");
-    if (!mStarted) {
-        ALOGW("Call stop() when encoder has not started");
-        return OK;
-    }
-
-    if (mInputBuffer) {
-        mInputBuffer->release();
-        mInputBuffer = NULL;
-    }
-
-    if (mGroup) {
-        delete mGroup;
-        mGroup = NULL;
-    }
-
-    if (mInputFrameData) {
-        delete mInputFrameData;
-        mInputFrameData = NULL;
-    }
-
-    PVAVCCleanUpEncoder(mHandle);
-    mSource->stop();
-    releaseOutputBuffers();
-    mStarted = false;
-
-    return OK;
-}
-
-void AVCEncoder::releaseOutputBuffers() {
-    ALOGV("releaseOutputBuffers");
-    for (size_t i = 0; i < mOutputBuffers.size(); ++i) {
-        MediaBuffer *buffer = mOutputBuffers.editItemAt(i);
-        buffer->setObserver(NULL);
-        buffer->release();
-    }
-    mOutputBuffers.clear();
-}
-
-sp<MetaData> AVCEncoder::getFormat() {
-    ALOGV("getFormat");
-    return mFormat;
-}
-
-status_t AVCEncoder::read(
-        MediaBuffer **out, const ReadOptions *options) {
-
-    CHECK(!options);
-    *out = NULL;
-
-    MediaBuffer *outputBuffer;
-    CHECK_EQ((status_t)OK, mGroup->acquire_buffer(&outputBuffer));
-    uint8_t *outPtr = (uint8_t *) outputBuffer->data();
-    uint32_t dataLength = outputBuffer->size();
-
-    if (!mSpsPpsHeaderReceived && mNumInputFrames < 0) {
-        // 4 bytes are reserved for holding the start code 0x00000001
-        // of the sequence parameter set at the beginning.
-        outPtr += 4;
-        dataLength -= 4;
-    }
-
-    int32_t type;
-    AVCEnc_Status encoderStatus = AVCENC_SUCCESS;
-
-    // Combine SPS and PPS and place them in the very first output buffer
-    // SPS and PPS are separated by start code 0x00000001
-    // Assume that we have exactly one SPS and exactly one PPS.
-    while (!mSpsPpsHeaderReceived && mNumInputFrames <= 0) {
-        encoderStatus = PVAVCEncodeNAL(mHandle, outPtr, &dataLength, &type);
-        if (encoderStatus == AVCENC_WRONG_STATE) {
-            mSpsPpsHeaderReceived = true;
-            CHECK_EQ(0, mNumInputFrames);  // 1st video frame is 0
-        } else {
-            switch (type) {
-                case AVC_NALTYPE_SPS:
-                    ++mNumInputFrames;
-                    memcpy((uint8_t *)outputBuffer->data(), "\x00\x00\x00\x01", 4);
-                    outputBuffer->set_range(0, dataLength + 4);
-                    outPtr += (dataLength + 4);  // 4 bytes for next start code
-                    dataLength = outputBuffer->size() -
-                            (outputBuffer->range_length() + 4);
-                    break;
-                case AVC_NALTYPE_PPS:
-                    ++mNumInputFrames;
-                    memcpy(((uint8_t *) outputBuffer->data()) +
-                            outputBuffer->range_length(),
-                            "\x00\x00\x00\x01", 4);
-                    outputBuffer->set_range(0,
-                            dataLength + outputBuffer->range_length() + 4);
-                    outputBuffer->meta_data()->setInt32(kKeyIsCodecConfig, 1);
-                    outputBuffer->meta_data()->setInt64(kKeyTime, 0);
-                    *out = outputBuffer;
-                    return OK;
-                default:
-                    ALOGE("Nal type (%d) other than SPS/PPS is unexpected", type);
-                    return UNKNOWN_ERROR;
-            }
-        }
-    }
-
-    // Get next input video frame
-    if (mReadyForNextFrame) {
-        if (mInputBuffer) {
-            mInputBuffer->release();
-            mInputBuffer = NULL;
-        }
-        status_t err = mSource->read(&mInputBuffer, options);
-        if (err != OK) {
-            if (err != ERROR_END_OF_STREAM) {
-                ALOGE("Failed to read input video frame: %d", err);
-            }
-            outputBuffer->release();
-            return err;
-        }
-
-        if (mInputBuffer->size() - ((mVideoWidth * mVideoHeight * 3) >> 1) != 0) {
-            outputBuffer->release();
-            mInputBuffer->release();
-            mInputBuffer = NULL;
-            return UNKNOWN_ERROR;
-        }
-
-        int64_t timeUs;
-        CHECK(mInputBuffer->meta_data()->findInt64(kKeyTime, &timeUs));
-        outputBuffer->meta_data()->setInt64(kKeyTime, timeUs);
-
-        // When the timestamp of the current sample is the same as
-        // that of the previous sample, the encoding of the sample
-        // is bypassed, and the output length is set to 0.
-        if (mNumInputFrames >= 1 && mPrevTimestampUs == timeUs) {
-            // Frame arrives too late
-            mInputBuffer->release();
-            mInputBuffer = NULL;
-            outputBuffer->set_range(0, 0);
-            *out = outputBuffer;
-            return OK;
-        }
-
-        // Don't accept out-of-order samples
-        CHECK(mPrevTimestampUs < timeUs);
-        mPrevTimestampUs = timeUs;
-
-        AVCFrameIO videoInput;
-        memset(&videoInput, 0, sizeof(videoInput));
-        videoInput.height = ((mVideoHeight  + 15) >> 4) << 4;
-        videoInput.pitch = ((mVideoWidth + 15) >> 4) << 4;
-        videoInput.coding_timestamp = (timeUs + 500) / 1000;  // in ms
-        uint8_t *inputData = (uint8_t *) mInputBuffer->data();
-
-        if (mVideoColorFormat != OMX_COLOR_FormatYUV420Planar) {
-            CHECK(mInputFrameData);
-            CHECK(mVideoColorFormat == OMX_COLOR_FormatYUV420SemiPlanar);
-            ConvertYUV420SemiPlanarToYUV420Planar(
-                inputData, mInputFrameData, mVideoWidth, mVideoHeight);
-            inputData = mInputFrameData;
-        }
-        CHECK(inputData != NULL);
-        videoInput.YCbCr[0] = inputData;
-        videoInput.YCbCr[1] = videoInput.YCbCr[0] + videoInput.height * videoInput.pitch;
-        videoInput.YCbCr[2] = videoInput.YCbCr[1] +
-            ((videoInput.height * videoInput.pitch) >> 2);
-        videoInput.disp_order = mNumInputFrames;
-
-        encoderStatus = PVAVCEncSetInput(mHandle, &videoInput);
-        if (encoderStatus == AVCENC_SUCCESS ||
-            encoderStatus == AVCENC_NEW_IDR) {
-            mReadyForNextFrame = false;
-            ++mNumInputFrames;
-            if (encoderStatus == AVCENC_NEW_IDR) {
-                mIsIDRFrame = 1;
-            }
-        } else {
-            if (encoderStatus < AVCENC_SUCCESS) {
-                outputBuffer->release();
-                return UNKNOWN_ERROR;
-            } else {
-                outputBuffer->set_range(0, 0);
-                *out = outputBuffer;
-                return OK;
-            }
-        }
-    }
-
-    // Encode an input video frame
-    CHECK(encoderStatus == AVCENC_SUCCESS ||
-          encoderStatus == AVCENC_NEW_IDR);
-    dataLength = outputBuffer->size();  // Reset the output buffer length
-    encoderStatus = PVAVCEncodeNAL(mHandle, outPtr, &dataLength, &type);
-    if (encoderStatus == AVCENC_SUCCESS) {
-        outputBuffer->meta_data()->setInt32(kKeyIsSyncFrame, mIsIDRFrame);
-        CHECK(NULL == PVAVCEncGetOverrunBuffer(mHandle));
-    } else if (encoderStatus == AVCENC_PICTURE_READY) {
-        CHECK(NULL == PVAVCEncGetOverrunBuffer(mHandle));
-        if (mIsIDRFrame) {
-            outputBuffer->meta_data()->setInt32(kKeyIsSyncFrame, mIsIDRFrame);
-            mIsIDRFrame = 0;
-            ALOGV("Output an IDR frame");
-        }
-        mReadyForNextFrame = true;
-        AVCFrameIO recon;
-        if (PVAVCEncGetRecon(mHandle, &recon) == AVCENC_SUCCESS) {
-            PVAVCEncReleaseRecon(mHandle, &recon);
-        }
-    } else {
-        dataLength = 0;
-        mReadyForNextFrame = true;
-    }
-    if (encoderStatus < AVCENC_SUCCESS) {
-        outputBuffer->release();
-        return UNKNOWN_ERROR;
-    }
-
-    outputBuffer->set_range(0, dataLength);
-    *out = outputBuffer;
-    return OK;
-}
-
-int32_t AVCEncoder::allocOutputBuffers(
-        unsigned int sizeInMbs, unsigned int numBuffers) {
-    CHECK(mOutputBuffers.isEmpty());
-    size_t frameSize = (sizeInMbs << 7) * 3;
-    for (unsigned int i = 0; i <  numBuffers; ++i) {
-        MediaBuffer *buffer = new MediaBuffer(frameSize);
-        buffer->setObserver(this);
-        mOutputBuffers.push(buffer);
-    }
-
-    return 1;
-}
-
-void AVCEncoder::unbindOutputBuffer(int32_t index) {
-    CHECK(index >= 0);
-}
-
-int32_t AVCEncoder::bindOutputBuffer(int32_t index, uint8_t **yuv) {
-    CHECK(index >= 0);
-    CHECK(index < (int32_t) mOutputBuffers.size());
-    int64_t timeUs;
-    CHECK(mInputBuffer->meta_data()->findInt64(kKeyTime, &timeUs));
-    mOutputBuffers[index]->meta_data()->setInt64(kKeyTime, timeUs);
-
-    *yuv = (uint8_t *) mOutputBuffers[index]->data();
-
-    return 1;
-}
-
-void AVCEncoder::signalBufferReturned(MediaBuffer *buffer) {
-}
-
-}  // namespace android
diff --git a/media/libstagefright/codecs/avc/enc/Android.mk b/media/libstagefright/codecs/avc/enc/Android.mk
index ee31ab2..48923cf 100644
--- a/media/libstagefright/codecs/avc/enc/Android.mk
+++ b/media/libstagefright/codecs/avc/enc/Android.mk
@@ -2,8 +2,6 @@
 include $(CLEAR_VARS)
 
 LOCAL_SRC_FILES := \
-    AVCEncoder.cpp \
-    SoftAVCEncoder.cpp \
     src/avcenc_api.cpp \
     src/bitstream_io.cpp \
     src/block.cpp \
diff --git a/media/libstagefright/codecs/m4v_h263/enc/Android.mk b/media/libstagefright/codecs/m4v_h263/enc/Android.mk
index 7cbb38f..484180d 100644
--- a/media/libstagefright/codecs/m4v_h263/enc/Android.mk
+++ b/media/libstagefright/codecs/m4v_h263/enc/Android.mk
@@ -2,7 +2,6 @@
 include $(CLEAR_VARS)
 
 LOCAL_SRC_FILES := \
-    M4vH263Encoder.cpp \
     src/bitstream_io.cpp \
     src/combined_encode.cpp \
     src/datapart_encode.cpp \
@@ -35,3 +34,39 @@
     $(TOP)/frameworks/native/include/media/openmax
 
 include $(BUILD_STATIC_LIBRARY)
+
+################################################################################
+
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES := \
+        SoftMPEG4Encoder.cpp
+
+LOCAL_C_INCLUDES := \
+        frameworks/av/media/libstagefright/include \
+        frameworks/native/include/media/openmax \
+        $(LOCAL_PATH)/src \
+        $(LOCAL_PATH)/include \
+        $(LOCAL_PATH)/../common/include \
+        $(LOCAL_PATH)/../common
+
+LOCAL_CFLAGS := \
+    -DBX_RC \
+    -DOSCL_IMPORT_REF= -DOSCL_UNUSED_ARG= -DOSCL_EXPORT_REF=
+
+
+LOCAL_STATIC_LIBRARIES := \
+        libstagefright_m4vh263enc
+
+LOCAL_SHARED_LIBRARIES := \
+        libstagefright \
+        libstagefright_enc_common \
+        libstagefright_foundation \
+        libstagefright_omx \
+        libutils \
+
+
+LOCAL_MODULE := libstagefright_soft_mpeg4enc
+LOCAL_MODULE_TAGS := optional
+
+include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libstagefright/codecs/m4v_h263/enc/M4vH263Encoder.cpp b/media/libstagefright/codecs/m4v_h263/enc/M4vH263Encoder.cpp
deleted file mode 100644
index 20b0f8d..0000000
--- a/media/libstagefright/codecs/m4v_h263/enc/M4vH263Encoder.cpp
+++ /dev/null
@@ -1,487 +0,0 @@
-/*
- * Copyright (C) 2010 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "M4vH263Encoder"
-#include <utils/Log.h>
-
-#include "M4vH263Encoder.h"
-
-#include "mp4enc_api.h"
-#include "OMX_Video.h"
-
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/MediaBufferGroup.h>
-#include <media/stagefright/MediaDefs.h>
-#include <media/stagefright/MediaErrors.h>
-#include <media/stagefright/MetaData.h>
-#include <media/stagefright/Utils.h>
-
-namespace android {
-
-static status_t ConvertOmxProfileLevel(
-        MP4EncodingMode mode,
-        int32_t omxProfile,
-        int32_t omxLevel,
-        ProfileLevelType* pvProfileLevel) {
-    ALOGV("ConvertOmxProfileLevel: %d/%d/%d", mode, omxProfile, omxLevel);
-    ProfileLevelType profileLevel;
-    if (mode == H263_MODE) {
-        switch (omxProfile) {
-            case OMX_VIDEO_H263ProfileBaseline:
-                if (omxLevel > OMX_VIDEO_H263Level45) {
-                    ALOGE("Unsupported level (%d) for H263", omxLevel);
-                    return BAD_VALUE;
-                } else {
-                    ALOGW("PV does not support level configuration for H263");
-                    profileLevel = CORE_PROFILE_LEVEL2;
-                    break;
-                }
-                break;
-            default:
-                ALOGE("Unsupported profile (%d) for H263", omxProfile);
-                return BAD_VALUE;
-        }
-    } else {  // MPEG4
-        switch (omxProfile) {
-            case OMX_VIDEO_MPEG4ProfileSimple:
-                switch (omxLevel) {
-                    case OMX_VIDEO_MPEG4Level0b:
-                        profileLevel = SIMPLE_PROFILE_LEVEL0;
-                        break;
-                    case OMX_VIDEO_MPEG4Level1:
-                        profileLevel = SIMPLE_PROFILE_LEVEL1;
-                        break;
-                    case OMX_VIDEO_MPEG4Level2:
-                        profileLevel = SIMPLE_PROFILE_LEVEL2;
-                        break;
-                    case OMX_VIDEO_MPEG4Level3:
-                        profileLevel = SIMPLE_PROFILE_LEVEL3;
-                        break;
-                    default:
-                        ALOGE("Unsupported level (%d) for MPEG4 simple profile",
-                            omxLevel);
-                        return BAD_VALUE;
-                }
-                break;
-            case OMX_VIDEO_MPEG4ProfileSimpleScalable:
-                switch (omxLevel) {
-                    case OMX_VIDEO_MPEG4Level0b:
-                        profileLevel = SIMPLE_SCALABLE_PROFILE_LEVEL0;
-                        break;
-                    case OMX_VIDEO_MPEG4Level1:
-                        profileLevel = SIMPLE_SCALABLE_PROFILE_LEVEL1;
-                        break;
-                    case OMX_VIDEO_MPEG4Level2:
-                        profileLevel = SIMPLE_SCALABLE_PROFILE_LEVEL2;
-                        break;
-                    default:
-                        ALOGE("Unsupported level (%d) for MPEG4 simple "
-                             "scalable profile", omxLevel);
-                        return BAD_VALUE;
-                }
-                break;
-            case OMX_VIDEO_MPEG4ProfileCore:
-                switch (omxLevel) {
-                    case OMX_VIDEO_MPEG4Level1:
-                        profileLevel = CORE_PROFILE_LEVEL1;
-                        break;
-                    case OMX_VIDEO_MPEG4Level2:
-                        profileLevel = CORE_PROFILE_LEVEL2;
-                        break;
-                    default:
-                        ALOGE("Unsupported level (%d) for MPEG4 core "
-                             "profile", omxLevel);
-                        return BAD_VALUE;
-                }
-                break;
-            case OMX_VIDEO_MPEG4ProfileCoreScalable:
-                switch (omxLevel) {
-                    case OMX_VIDEO_MPEG4Level1:
-                        profileLevel = CORE_SCALABLE_PROFILE_LEVEL1;
-                        break;
-                    case OMX_VIDEO_MPEG4Level2:
-                        profileLevel = CORE_SCALABLE_PROFILE_LEVEL2;
-                        break;
-                    case OMX_VIDEO_MPEG4Level3:
-                        profileLevel = CORE_SCALABLE_PROFILE_LEVEL3;
-                        break;
-                    default:
-                        ALOGE("Unsupported level (%d) for MPEG4 core "
-                             "scalable profile", omxLevel);
-                        return BAD_VALUE;
-                }
-                break;
-            default:
-                ALOGE("Unsupported MPEG4 profile (%d)", omxProfile);
-                return BAD_VALUE;
-        }
-    }
-
-    *pvProfileLevel = profileLevel;
-    return OK;
-}
-
-inline static void ConvertYUV420SemiPlanarToYUV420Planar(
-        uint8_t *inyuv, uint8_t* outyuv,
-        int32_t width, int32_t height) {
-
-    int32_t outYsize = width * height;
-    uint32_t *outy = (uint32_t *)  outyuv;
-    uint16_t *outcb = (uint16_t *) (outyuv + outYsize);
-    uint16_t *outcr = (uint16_t *) (outyuv + outYsize + (outYsize >> 2));
-
-    /* Y copying */
-    memcpy(outy, inyuv, outYsize);
-
-    /* U & V copying */
-    uint32_t *inyuv_4 = (uint32_t *) (inyuv + outYsize);
-    for (int32_t i = height >> 1; i > 0; --i) {
-        for (int32_t j = width >> 2; j > 0; --j) {
-            uint32_t temp = *inyuv_4++;
-            uint32_t tempU = temp & 0xFF;
-            tempU = tempU | ((temp >> 8) & 0xFF00);
-
-            uint32_t tempV = (temp >> 8) & 0xFF;
-            tempV = tempV | ((temp >> 16) & 0xFF00);
-
-            // Flip U and V
-            *outcb++ = tempV;
-            *outcr++ = tempU;
-        }
-    }
-}
-
-M4vH263Encoder::M4vH263Encoder(
-        const sp<MediaSource>& source,
-        const sp<MetaData>& meta)
-    : mSource(source),
-      mMeta(meta),
-      mNumInputFrames(-1),
-      mNextModTimeUs(0),
-      mPrevTimestampUs(-1),
-      mStarted(false),
-      mInputBuffer(NULL),
-      mInputFrameData(NULL),
-      mGroup(NULL) {
-
-    ALOGI("Construct software M4vH263Encoder");
-
-    mHandle = new tagvideoEncControls;
-    memset(mHandle, 0, sizeof(tagvideoEncControls));
-
-    mInitCheck = initCheck(meta);
-}
-
-M4vH263Encoder::~M4vH263Encoder() {
-    ALOGV("Destruct software M4vH263Encoder");
-    if (mStarted) {
-        stop();
-    }
-
-    delete mEncParams;
-    delete mHandle;
-}
-
-status_t M4vH263Encoder::initCheck(const sp<MetaData>& meta) {
-    ALOGV("initCheck");
-    CHECK(meta->findInt32(kKeyWidth, &mVideoWidth));
-    CHECK(meta->findInt32(kKeyHeight, &mVideoHeight));
-    CHECK(meta->findInt32(kKeyFrameRate, &mVideoFrameRate));
-    CHECK(meta->findInt32(kKeyBitRate, &mVideoBitRate));
-
-    // XXX: Add more color format support
-    CHECK(meta->findInt32(kKeyColorFormat, &mVideoColorFormat));
-    if (mVideoColorFormat != OMX_COLOR_FormatYUV420Planar) {
-        if (mVideoColorFormat != OMX_COLOR_FormatYUV420SemiPlanar) {
-            ALOGE("Color format %d is not supported", mVideoColorFormat);
-            return BAD_VALUE;
-        }
-        // Allocate spare buffer only when color conversion is needed.
-        // Assume the color format is OMX_COLOR_FormatYUV420SemiPlanar.
-        mInputFrameData =
-            (uint8_t *) malloc((mVideoWidth * mVideoHeight * 3 ) >> 1);
-        CHECK(mInputFrameData);
-    }
-
-    // XXX: Remove this restriction
-    if (mVideoWidth % 16 != 0 || mVideoHeight % 16 != 0) {
-        ALOGE("Video frame size %dx%d must be a multiple of 16",
-            mVideoWidth, mVideoHeight);
-        return BAD_VALUE;
-    }
-
-    mEncParams = new tagvideoEncOptions;
-    memset(mEncParams, 0, sizeof(tagvideoEncOptions));
-    if (!PVGetDefaultEncOption(mEncParams, 0)) {
-        ALOGE("Failed to get default encoding parameters");
-        return BAD_VALUE;
-    }
-
-    // Need to know which role the encoder is in.
-    // XXX: Set the mode proper for other types of applications
-    //      like streaming or video conference
-    const char *mime;
-    CHECK(meta->findCString(kKeyMIMEType, &mime));
-    CHECK(!strcmp(mime, MEDIA_MIMETYPE_VIDEO_MPEG4) ||
-          !strcmp(mime, MEDIA_MIMETYPE_VIDEO_H263));
-    if (!strcmp(mime, MEDIA_MIMETYPE_VIDEO_MPEG4)) {
-        mEncParams->encMode = COMBINE_MODE_WITH_ERR_RES;
-    } else {
-        mEncParams->encMode = H263_MODE;
-    }
-    mEncParams->encWidth[0] = mVideoWidth;
-    mEncParams->encHeight[0] = mVideoHeight;
-    mEncParams->encFrameRate[0] = mVideoFrameRate;
-    mEncParams->rcType = VBR_1;
-    mEncParams->vbvDelay = (float)5.0;
-
-    // Set profile and level
-    // If profile and level setting is not correct, failure
-    // is reported when the encoder is initialized.
-    mEncParams->profile_level = CORE_PROFILE_LEVEL2;
-    int32_t profile, level;
-    if (meta->findInt32(kKeyVideoProfile, &profile) &&
-        meta->findInt32(kKeyVideoLevel, &level)) {
-        if (OK != ConvertOmxProfileLevel(
-                        mEncParams->encMode, profile, level,
-                        &mEncParams->profile_level)) {
-            return BAD_VALUE;
-        }
-    }
-
-    mEncParams->packetSize = 32;
-    mEncParams->rvlcEnable = PV_OFF;
-    mEncParams->numLayers = 1;
-    mEncParams->timeIncRes = 1000;
-    mEncParams->tickPerSrc = mEncParams->timeIncRes / mVideoFrameRate;
-
-    mEncParams->bitRate[0] = mVideoBitRate;
-    mEncParams->iQuant[0] = 15;
-    mEncParams->pQuant[0] = 12;
-    mEncParams->quantType[0] = 0;
-    mEncParams->noFrameSkipped = PV_OFF;
-
-    // Set IDR frame refresh interval
-    int32_t iFramesIntervalSec;
-    CHECK(meta->findInt32(kKeyIFramesInterval, &iFramesIntervalSec));
-    if (iFramesIntervalSec < 0) {
-        mEncParams->intraPeriod = -1;
-    } else if (iFramesIntervalSec == 0) {
-        mEncParams->intraPeriod = 1;  // All I frames
-    } else {
-        mEncParams->intraPeriod =
-            (iFramesIntervalSec * mVideoFrameRate);
-    }
-
-    mEncParams->numIntraMB = 0;
-    mEncParams->sceneDetect = PV_ON;
-    mEncParams->searchRange = 16;
-    mEncParams->mv8x8Enable = PV_OFF;
-    mEncParams->gobHeaderInterval = 0;
-    mEncParams->useACPred = PV_ON;
-    mEncParams->intraDCVlcTh = 0;
-
-    mFormat = new MetaData;
-    mFormat->setInt32(kKeyWidth, mVideoWidth);
-    mFormat->setInt32(kKeyHeight, mVideoHeight);
-    mFormat->setInt32(kKeyBitRate, mVideoBitRate);
-    mFormat->setInt32(kKeyFrameRate, mVideoFrameRate);
-    mFormat->setInt32(kKeyColorFormat, mVideoColorFormat);
-
-    mFormat->setCString(kKeyMIMEType, mime);
-    mFormat->setCString(kKeyDecoderComponent, "M4vH263Encoder");
-    return OK;
-}
-
-status_t M4vH263Encoder::start(MetaData *params) {
-    ALOGV("start");
-    if (mInitCheck != OK) {
-        return mInitCheck;
-    }
-
-    if (mStarted) {
-        ALOGW("Call start() when encoder already started");
-        return OK;
-    }
-
-    if (!PVInitVideoEncoder(mHandle, mEncParams)) {
-        ALOGE("Failed to initialize the encoder");
-        return UNKNOWN_ERROR;
-    }
-
-    mGroup = new MediaBufferGroup();
-    int32_t maxSize;
-    if (!PVGetMaxVideoFrameSize(mHandle, &maxSize)) {
-        maxSize = 256 * 1024;  // Magic #
-    }
-    ALOGV("Max output buffer size: %d", maxSize);
-    mGroup->add_buffer(new MediaBuffer(maxSize));
-
-    mSource->start(params);
-    mNumInputFrames = -1;  // 1st frame contains codec specific data
-    mStarted = true;
-
-    return OK;
-}
-
-status_t M4vH263Encoder::stop() {
-    ALOGV("stop");
-    if (!mStarted) {
-        ALOGW("Call stop() when encoder has not started");
-        return OK;
-    }
-
-    if (mInputBuffer) {
-        mInputBuffer->release();
-        mInputBuffer = NULL;
-    }
-
-    if (mGroup) {
-        delete mGroup;
-        mGroup = NULL;
-    }
-
-    if (mInputFrameData) {
-        delete mInputFrameData;
-        mInputFrameData = NULL;
-    }
-
-    CHECK(PVCleanUpVideoEncoder(mHandle));
-
-    mSource->stop();
-    mStarted = false;
-
-    return OK;
-}
-
-sp<MetaData> M4vH263Encoder::getFormat() {
-    ALOGV("getFormat");
-    return mFormat;
-}
-
-status_t M4vH263Encoder::read(
-        MediaBuffer **out, const ReadOptions *options) {
-
-    *out = NULL;
-
-    MediaBuffer *outputBuffer;
-    CHECK_EQ((status_t)OK, mGroup->acquire_buffer(&outputBuffer));
-    uint8_t *outPtr = (uint8_t *) outputBuffer->data();
-    int32_t dataLength = outputBuffer->size();
-
-    // Output codec specific data
-    if (mNumInputFrames < 0) {
-        if (!PVGetVolHeader(mHandle, outPtr, &dataLength, 0)) {
-            ALOGE("Failed to get VOL header");
-            return UNKNOWN_ERROR;
-        }
-        ALOGV("Output VOL header: %d bytes", dataLength);
-        outputBuffer->meta_data()->setInt32(kKeyIsCodecConfig, 1);
-        outputBuffer->set_range(0, dataLength);
-        *out = outputBuffer;
-        ++mNumInputFrames;
-        return OK;
-    }
-
-    // Ready for accepting an input video frame
-    status_t err = mSource->read(&mInputBuffer, options);
-    if (OK != err) {
-        if (err != ERROR_END_OF_STREAM) {
-            ALOGE("Failed to read from data source");
-        }
-        outputBuffer->release();
-        return err;
-    }
-
-    if (mInputBuffer->size() - ((mVideoWidth * mVideoHeight * 3) >> 1) != 0) {
-        outputBuffer->release();
-        mInputBuffer->release();
-        mInputBuffer = NULL;
-        return UNKNOWN_ERROR;
-    }
-
-    int64_t timeUs;
-    CHECK(mInputBuffer->meta_data()->findInt64(kKeyTime, &timeUs));
-
-    // When the timestamp of the current sample is the same as that
-    // of the previous sample, encoding of the current sample is
-    // bypassed, and the output length of the sample is set to 0
-    if (mNumInputFrames >= 1 &&
-        (mNextModTimeUs > timeUs || mPrevTimestampUs == timeUs)) {
-        // Frame arrives too late
-        outputBuffer->set_range(0, 0);
-        *out = outputBuffer;
-        mInputBuffer->release();
-        mInputBuffer = NULL;
-        return OK;
-    }
-
-    // Don't accept out-of-order samples
-    CHECK(mPrevTimestampUs < timeUs);
-    mPrevTimestampUs = timeUs;
-
-    // Color convert to OMX_COLOR_FormatYUV420Planar if necessary
-    outputBuffer->meta_data()->setInt64(kKeyTime, timeUs);
-    uint8_t *inPtr = (uint8_t *) mInputBuffer->data();
-    if (mVideoColorFormat != OMX_COLOR_FormatYUV420Planar) {
-        CHECK(mInputFrameData);
-        CHECK(mVideoColorFormat == OMX_COLOR_FormatYUV420SemiPlanar);
-        ConvertYUV420SemiPlanarToYUV420Planar(
-            inPtr, mInputFrameData, mVideoWidth, mVideoHeight);
-        inPtr = mInputFrameData;
-    }
-    CHECK(inPtr != NULL);
-
-    // Ready for encoding a video frame
-    VideoEncFrameIO vin, vout;
-    vin.height = ((mVideoHeight + 15) >> 4) << 4;
-    vin.pitch  = ((mVideoWidth  + 15) >> 4) << 4;
-    vin.timestamp = (timeUs + 500) / 1000; // in ms
-    vin.yChan = inPtr;
-    vin.uChan = vin.yChan + vin.height * vin.pitch;
-    vin.vChan = vin.uChan + ((vin.height * vin.pitch) >> 2);
-    unsigned long modTimeMs = 0;
-    int32_t nLayer = 0;
-    MP4HintTrack hintTrack;
-    if (!PVEncodeVideoFrame(mHandle, &vin, &vout,
-            &modTimeMs, outPtr, &dataLength, &nLayer) ||
-        !PVGetHintTrack(mHandle, &hintTrack)) {
-        ALOGE("Failed to encode frame or get hink track at frame %lld",
-            mNumInputFrames);
-        outputBuffer->release();
-        mInputBuffer->release();
-        mInputBuffer = NULL;
-        return UNKNOWN_ERROR;
-    }
-    CHECK(NULL == PVGetOverrunBuffer(mHandle));
-    if (hintTrack.CodeType == 0) {  // I-frame serves as sync frame
-        outputBuffer->meta_data()->setInt32(kKeyIsSyncFrame, 1);
-    }
-
-    ++mNumInputFrames;
-    mNextModTimeUs = modTimeMs * 1000LL;
-    outputBuffer->set_range(0, dataLength);
-    *out = outputBuffer;
-    mInputBuffer->release();
-    mInputBuffer = NULL;
-    return OK;
-}
-
-void M4vH263Encoder::signalBufferReturned(MediaBuffer *buffer) {
-}
-
-}  // namespace android
diff --git a/media/libstagefright/codecs/m4v_h263/enc/SoftMPEG4Encoder.cpp b/media/libstagefright/codecs/m4v_h263/enc/SoftMPEG4Encoder.cpp
new file mode 100644
index 0000000..a5a2332
--- /dev/null
+++ b/media/libstagefright/codecs/m4v_h263/enc/SoftMPEG4Encoder.cpp
@@ -0,0 +1,706 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "SoftMPEG4Encoder"
+#include <utils/Log.h>
+
+#include "mp4enc_api.h"
+#include "OMX_Video.h"
+
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/MetaData.h>
+#include <media/stagefright/Utils.h>
+
+#include "SoftMPEG4Encoder.h"
+
+namespace android {
+
+template<class T>
+static void InitOMXParams(T *params) {
+    params->nSize = sizeof(T);
+    params->nVersion.s.nVersionMajor = 1;
+    params->nVersion.s.nVersionMinor = 0;
+    params->nVersion.s.nRevision = 0;
+    params->nVersion.s.nStep = 0;
+}
+
+inline static void ConvertYUV420SemiPlanarToYUV420Planar(
+        uint8_t *inyuv, uint8_t* outyuv,
+        int32_t width, int32_t height) {
+
+    int32_t outYsize = width * height;
+    uint32_t *outy =  (uint32_t *) outyuv;
+    uint16_t *outcb = (uint16_t *) (outyuv + outYsize);
+    uint16_t *outcr = (uint16_t *) (outyuv + outYsize + (outYsize >> 2));
+
+    /* Y copying */
+    memcpy(outy, inyuv, outYsize);
+
+    /* U & V copying */
+    uint32_t *inyuv_4 = (uint32_t *) (inyuv + outYsize);
+    for (int32_t i = height >> 1; i > 0; --i) {
+        for (int32_t j = width >> 2; j > 0; --j) {
+            uint32_t temp = *inyuv_4++;
+            uint32_t tempU = temp & 0xFF;
+            tempU = tempU | ((temp >> 8) & 0xFF00);
+
+            uint32_t tempV = (temp >> 8) & 0xFF;
+            tempV = tempV | ((temp >> 16) & 0xFF00);
+
+            // Flip U and V
+            *outcb++ = tempV;
+            *outcr++ = tempU;
+        }
+    }
+}
+
+SoftMPEG4Encoder::SoftMPEG4Encoder(
+            const char *name,
+            const OMX_CALLBACKTYPE *callbacks,
+            OMX_PTR appData,
+            OMX_COMPONENTTYPE **component)
+    : SimpleSoftOMXComponent(name, callbacks, appData, component),
+      mEncodeMode(COMBINE_MODE_WITH_ERR_RES),
+      mVideoWidth(176),
+      mVideoHeight(144),
+      mVideoFrameRate(30),
+      mVideoBitRate(192000),
+      mVideoColorFormat(OMX_COLOR_FormatYUV420Planar),
+      mIDRFrameRefreshIntervalInSec(1),
+      mNumInputFrames(-1),
+      mStarted(false),
+      mSawInputEOS(false),
+      mSignalledError(false),
+      mHandle(new tagvideoEncControls),
+      mEncParams(new tagvideoEncOptions),
+      mInputFrameData(NULL) {
+
+   if (!strcmp(name, "OMX.google.h263.encoder")) {
+        mEncodeMode = H263_MODE;
+    } else {
+        CHECK(!strcmp(name, "OMX.google.mpeg4.encoder"));
+    }
+
+    initPorts();
+    ALOGI("Construct SoftMPEG4Encoder");
+}
+
+SoftMPEG4Encoder::~SoftMPEG4Encoder() {
+    ALOGV("Destruct SoftMPEG4Encoder");
+    releaseEncoder();
+    List<BufferInfo *> &outQueue = getPortQueue(1);
+    List<BufferInfo *> &inQueue = getPortQueue(0);
+    CHECK(outQueue.empty());
+    CHECK(inQueue.empty());
+}
+
+OMX_ERRORTYPE SoftMPEG4Encoder::initEncParams() {
+    CHECK(mHandle != NULL);
+    memset(mHandle, 0, sizeof(tagvideoEncControls));
+
+    CHECK(mEncParams != NULL);
+    memset(mEncParams, 0, sizeof(tagvideoEncOptions));
+    if (!PVGetDefaultEncOption(mEncParams, 0)) {
+        ALOGE("Failed to get default encoding parameters");
+        return OMX_ErrorUndefined;
+    }
+    mEncParams->encMode = mEncodeMode;
+    mEncParams->encWidth[0] = mVideoWidth;
+    mEncParams->encHeight[0] = mVideoHeight;
+    mEncParams->encFrameRate[0] = mVideoFrameRate;
+    mEncParams->rcType = VBR_1;
+    mEncParams->vbvDelay = 5.0f;
+
+    // FIXME:
+    // Add more profile and level support for MPEG4 encoder
+    mEncParams->profile_level = CORE_PROFILE_LEVEL2;
+    mEncParams->packetSize = 32;
+    mEncParams->rvlcEnable = PV_OFF;
+    mEncParams->numLayers = 1;
+    mEncParams->timeIncRes = 1000;
+    mEncParams->tickPerSrc = mEncParams->timeIncRes / mVideoFrameRate;
+
+    mEncParams->bitRate[0] = mVideoBitRate;
+    mEncParams->iQuant[0] = 15;
+    mEncParams->pQuant[0] = 12;
+    mEncParams->quantType[0] = 0;
+    mEncParams->noFrameSkipped = PV_OFF;
+
+    if (mVideoColorFormat == OMX_COLOR_FormatYUV420SemiPlanar) {
+        // Color conversion is needed.
+        CHECK(mInputFrameData == NULL);
+        mInputFrameData =
+            (uint8_t *) malloc((mVideoWidth * mVideoHeight * 3 ) >> 1);
+        CHECK(mInputFrameData != NULL);
+    }
+
+    // PV's MPEG4 encoder requires the video dimension of multiple
+    if (mVideoWidth % 16 != 0 || mVideoHeight % 16 != 0) {
+        ALOGE("Video frame size %dx%d must be a multiple of 16",
+            mVideoWidth, mVideoHeight);
+        return OMX_ErrorBadParameter;
+    }
+
+    // Set IDR frame refresh interval
+    if (mIDRFrameRefreshIntervalInSec < 0) {
+        mEncParams->intraPeriod = -1;
+    } else if (mIDRFrameRefreshIntervalInSec == 0) {
+        mEncParams->intraPeriod = 1;  // All I frames
+    } else {
+        mEncParams->intraPeriod =
+            (mIDRFrameRefreshIntervalInSec * mVideoFrameRate);
+    }
+
+    mEncParams->numIntraMB = 0;
+    mEncParams->sceneDetect = PV_ON;
+    mEncParams->searchRange = 16;
+    mEncParams->mv8x8Enable = PV_OFF;
+    mEncParams->gobHeaderInterval = 0;
+    mEncParams->useACPred = PV_ON;
+    mEncParams->intraDCVlcTh = 0;
+
+    return OMX_ErrorNone;
+}
+
+OMX_ERRORTYPE SoftMPEG4Encoder::initEncoder() {
+    CHECK(!mStarted);
+
+    OMX_ERRORTYPE errType = OMX_ErrorNone;
+    if (OMX_ErrorNone != (errType = initEncParams())) {
+        ALOGE("Failed to initialized encoder params");
+        mSignalledError = true;
+        notify(OMX_EventError, OMX_ErrorUndefined, 0, 0);
+        return errType;
+    }
+
+    if (!PVInitVideoEncoder(mHandle, mEncParams)) {
+        ALOGE("Failed to initialize the encoder");
+        mSignalledError = true;
+        notify(OMX_EventError, OMX_ErrorUndefined, 0, 0);
+        return OMX_ErrorUndefined;
+    }
+
+    mNumInputFrames = -1;  // 1st buffer for codec specific data
+    mStarted = true;
+
+    return OMX_ErrorNone;
+}
+
+OMX_ERRORTYPE SoftMPEG4Encoder::releaseEncoder() {
+    if (!mStarted) {
+        return OMX_ErrorNone;
+    }
+
+    PVCleanUpVideoEncoder(mHandle);
+
+    delete mInputFrameData;
+    mInputFrameData = NULL;
+
+    delete mEncParams;
+    mEncParams = NULL;
+
+    delete mHandle;
+    mHandle = NULL;
+
+    mStarted = false;
+
+    return OMX_ErrorNone;
+}
+
+void SoftMPEG4Encoder::initPorts() {
+    OMX_PARAM_PORTDEFINITIONTYPE def;
+    InitOMXParams(&def);
+
+    const size_t kInputBufferSize = (mVideoWidth * mVideoHeight * 3) >> 1;
+
+    // 256 * 1024 is a magic number for PV's encoder, not sure why
+    const size_t kOutputBufferSize =
+        (kInputBufferSize > 256 * 1024)
+            ? kInputBufferSize: 256 * 1024;
+
+    def.nPortIndex = 0;
+    def.eDir = OMX_DirInput;
+    def.nBufferCountMin = kNumBuffers;
+    def.nBufferCountActual = def.nBufferCountMin;
+    def.nBufferSize = kInputBufferSize;
+    def.bEnabled = OMX_TRUE;
+    def.bPopulated = OMX_FALSE;
+    def.eDomain = OMX_PortDomainVideo;
+    def.bBuffersContiguous = OMX_FALSE;
+    def.nBufferAlignment = 1;
+
+    def.format.video.cMIMEType = const_cast<char *>("video/raw");
+
+    def.format.video.eCompressionFormat = OMX_VIDEO_CodingUnused;
+    def.format.video.eColorFormat = OMX_COLOR_FormatYUV420Planar;
+    def.format.video.xFramerate = (mVideoFrameRate << 16);  // Q16 format
+    def.format.video.nBitrate = mVideoBitRate;
+    def.format.video.nFrameWidth = mVideoWidth;
+    def.format.video.nFrameHeight = mVideoHeight;
+    def.format.video.nStride = mVideoWidth;
+    def.format.video.nSliceHeight = mVideoHeight;
+
+    addPort(def);
+
+    def.nPortIndex = 1;
+    def.eDir = OMX_DirOutput;
+    def.nBufferCountMin = kNumBuffers;
+    def.nBufferCountActual = def.nBufferCountMin;
+    def.nBufferSize = kOutputBufferSize;
+    def.bEnabled = OMX_TRUE;
+    def.bPopulated = OMX_FALSE;
+    def.eDomain = OMX_PortDomainVideo;
+    def.bBuffersContiguous = OMX_FALSE;
+    def.nBufferAlignment = 2;
+
+    def.format.video.cMIMEType =
+        (mEncodeMode == COMBINE_MODE_WITH_ERR_RES)
+            ? const_cast<char *>(MEDIA_MIMETYPE_VIDEO_MPEG4)
+            : const_cast<char *>(MEDIA_MIMETYPE_VIDEO_H263);
+
+    def.format.video.eCompressionFormat =
+        (mEncodeMode == COMBINE_MODE_WITH_ERR_RES)
+            ? OMX_VIDEO_CodingMPEG4
+            : OMX_VIDEO_CodingH263;
+
+    def.format.video.eColorFormat = OMX_COLOR_FormatUnused;
+    def.format.video.xFramerate = (0 << 16);  // Q16 format
+    def.format.video.nBitrate = mVideoBitRate;
+    def.format.video.nFrameWidth = mVideoWidth;
+    def.format.video.nFrameHeight = mVideoHeight;
+    def.format.video.nStride = mVideoWidth;
+    def.format.video.nSliceHeight = mVideoHeight;
+
+    addPort(def);
+}
+
+OMX_ERRORTYPE SoftMPEG4Encoder::internalGetParameter(
+        OMX_INDEXTYPE index, OMX_PTR params) {
+    switch (index) {
+        case OMX_IndexParamVideoErrorCorrection:
+        {
+            return OMX_ErrorNotImplemented;
+        }
+
+        case OMX_IndexParamVideoBitrate:
+        {
+            OMX_VIDEO_PARAM_BITRATETYPE *bitRate =
+                (OMX_VIDEO_PARAM_BITRATETYPE *) params;
+
+            if (bitRate->nPortIndex != 1) {
+                return OMX_ErrorUndefined;
+            }
+
+            bitRate->eControlRate = OMX_Video_ControlRateVariable;
+            bitRate->nTargetBitrate = mVideoBitRate;
+            return OMX_ErrorNone;
+        }
+
+        case OMX_IndexParamVideoPortFormat:
+        {
+            OMX_VIDEO_PARAM_PORTFORMATTYPE *formatParams =
+                (OMX_VIDEO_PARAM_PORTFORMATTYPE *)params;
+
+            if (formatParams->nPortIndex > 1) {
+                return OMX_ErrorUndefined;
+            }
+
+            if (formatParams->nIndex > 1) {
+                return OMX_ErrorNoMore;
+            }
+
+            if (formatParams->nPortIndex == 0) {
+                formatParams->eCompressionFormat = OMX_VIDEO_CodingUnused;
+                if (formatParams->nIndex == 0) {
+                    formatParams->eColorFormat = OMX_COLOR_FormatYUV420Planar;
+                } else {
+                    formatParams->eColorFormat = OMX_COLOR_FormatYUV420SemiPlanar;
+                }
+            } else {
+                formatParams->eCompressionFormat =
+                    (mEncodeMode == COMBINE_MODE_WITH_ERR_RES)
+                        ? OMX_VIDEO_CodingMPEG4
+                        : OMX_VIDEO_CodingH263;
+
+                formatParams->eColorFormat = OMX_COLOR_FormatUnused;
+            }
+
+            return OMX_ErrorNone;
+        }
+
+        case OMX_IndexParamVideoH263:
+        {
+            OMX_VIDEO_PARAM_H263TYPE *h263type =
+                (OMX_VIDEO_PARAM_H263TYPE *)params;
+
+            if (h263type->nPortIndex != 1) {
+                return OMX_ErrorUndefined;
+            }
+
+            h263type->nAllowedPictureTypes =
+                (OMX_VIDEO_PictureTypeI | OMX_VIDEO_PictureTypeP);
+            h263type->eProfile = OMX_VIDEO_H263ProfileBaseline;
+            h263type->eLevel = OMX_VIDEO_H263Level45;
+            h263type->bPLUSPTYPEAllowed = OMX_FALSE;
+            h263type->bForceRoundingTypeToZero = OMX_FALSE;
+            h263type->nPictureHeaderRepetition = 0;
+            h263type->nGOBHeaderInterval = 0;
+
+            return OMX_ErrorNone;
+        }
+
+        case OMX_IndexParamVideoMpeg4:
+        {
+            OMX_VIDEO_PARAM_MPEG4TYPE *mpeg4type =
+                (OMX_VIDEO_PARAM_MPEG4TYPE *)params;
+
+            if (mpeg4type->nPortIndex != 1) {
+                return OMX_ErrorUndefined;
+            }
+
+            mpeg4type->eProfile = OMX_VIDEO_MPEG4ProfileCore;
+            mpeg4type->eLevel = OMX_VIDEO_MPEG4Level2;
+            mpeg4type->nAllowedPictureTypes =
+                (OMX_VIDEO_PictureTypeI | OMX_VIDEO_PictureTypeP);
+            mpeg4type->nBFrames = 0;
+            mpeg4type->nIDCVLCThreshold = 0;
+            mpeg4type->bACPred = OMX_TRUE;
+            mpeg4type->nMaxPacketSize = 256;
+            mpeg4type->nTimeIncRes = 1000;
+            mpeg4type->nHeaderExtension = 0;
+            mpeg4type->bReversibleVLC = OMX_FALSE;
+
+            return OMX_ErrorNone;
+        }
+
+        case OMX_IndexParamVideoProfileLevelQuerySupported:
+        {
+            OMX_VIDEO_PARAM_PROFILELEVELTYPE *profileLevel =
+                (OMX_VIDEO_PARAM_PROFILELEVELTYPE *)params;
+
+            if (profileLevel->nPortIndex != 1) {
+                return OMX_ErrorUndefined;
+            }
+
+            if (profileLevel->nProfileIndex > 0) {
+                return OMX_ErrorNoMore;
+            }
+
+            if (mEncodeMode == H263_MODE) {
+                profileLevel->eProfile = OMX_VIDEO_H263ProfileBaseline;
+                profileLevel->eLevel = OMX_VIDEO_H263Level45;
+            } else {
+                profileLevel->eProfile = OMX_VIDEO_MPEG4ProfileCore;
+                profileLevel->eLevel = OMX_VIDEO_MPEG4Level2;
+            }
+
+            return OMX_ErrorNone;
+        }
+
+        default:
+            return SimpleSoftOMXComponent::internalGetParameter(index, params);
+    }
+}
+
+OMX_ERRORTYPE SoftMPEG4Encoder::internalSetParameter(
+        OMX_INDEXTYPE index, const OMX_PTR params) {
+    switch (index) {
+        case OMX_IndexParamVideoErrorCorrection:
+        {
+            return OMX_ErrorNotImplemented;
+        }
+
+        case OMX_IndexParamVideoBitrate:
+        {
+            OMX_VIDEO_PARAM_BITRATETYPE *bitRate =
+                (OMX_VIDEO_PARAM_BITRATETYPE *) params;
+
+            if (bitRate->nPortIndex != 1 ||
+                bitRate->eControlRate != OMX_Video_ControlRateVariable) {
+                return OMX_ErrorUndefined;
+            }
+
+            mVideoBitRate = bitRate->nTargetBitrate;
+            return OMX_ErrorNone;
+        }
+
+        case OMX_IndexParamPortDefinition:
+        {
+            OMX_PARAM_PORTDEFINITIONTYPE *def =
+                (OMX_PARAM_PORTDEFINITIONTYPE *)params;
+            if (def->nPortIndex > 1) {
+                return OMX_ErrorUndefined;
+            }
+
+            if (def->nPortIndex == 0) {
+                if (def->format.video.eCompressionFormat != OMX_VIDEO_CodingUnused ||
+                    (def->format.video.eColorFormat != OMX_COLOR_FormatYUV420Planar &&
+                     def->format.video.eColorFormat != OMX_COLOR_FormatYUV420SemiPlanar)) {
+                    return OMX_ErrorUndefined;
+                }
+            } else {
+                if ((mEncodeMode == COMBINE_MODE_WITH_ERR_RES &&
+                        def->format.video.eCompressionFormat != OMX_VIDEO_CodingMPEG4) ||
+                    (mEncodeMode == H263_MODE &&
+                        def->format.video.eCompressionFormat != OMX_VIDEO_CodingH263) ||
+                    (def->format.video.eColorFormat != OMX_COLOR_FormatUnused)) {
+                    return OMX_ErrorUndefined;
+                }
+            }
+
+            OMX_ERRORTYPE err = SimpleSoftOMXComponent::internalSetParameter(index, params);
+            if (OMX_ErrorNone != err) {
+                return err;
+            }
+
+            if (def->nPortIndex == 0) {
+                mVideoWidth = def->format.video.nFrameWidth;
+                mVideoHeight = def->format.video.nFrameHeight;
+                mVideoFrameRate = def->format.video.xFramerate >> 16;
+                mVideoColorFormat = def->format.video.eColorFormat;
+            } else {
+                mVideoBitRate = def->format.video.nBitrate;
+            }
+
+            return OMX_ErrorNone;
+        }
+
+        case OMX_IndexParamStandardComponentRole:
+        {
+            const OMX_PARAM_COMPONENTROLETYPE *roleParams =
+                (const OMX_PARAM_COMPONENTROLETYPE *)params;
+
+            if (strncmp((const char *)roleParams->cRole,
+                        (mEncodeMode == H263_MODE)
+                            ? "video_encoder.h263": "video_encoder.mpeg4",
+                        OMX_MAX_STRINGNAME_SIZE - 1)) {
+                return OMX_ErrorUndefined;
+            }
+
+            return OMX_ErrorNone;
+        }
+
+        case OMX_IndexParamVideoPortFormat:
+        {
+            const OMX_VIDEO_PARAM_PORTFORMATTYPE *formatParams =
+                (const OMX_VIDEO_PARAM_PORTFORMATTYPE *)params;
+
+            if (formatParams->nPortIndex > 1) {
+                return OMX_ErrorUndefined;
+            }
+
+            if (formatParams->nIndex > 1) {
+                return OMX_ErrorNoMore;
+            }
+
+            if (formatParams->nPortIndex == 0) {
+                if (formatParams->eCompressionFormat != OMX_VIDEO_CodingUnused ||
+                    ((formatParams->nIndex == 0 &&
+                      formatParams->eColorFormat != OMX_COLOR_FormatYUV420Planar) ||
+                    (formatParams->nIndex == 1 &&
+                     formatParams->eColorFormat != OMX_COLOR_FormatYUV420SemiPlanar))) {
+                    return OMX_ErrorUndefined;
+                }
+                mVideoColorFormat = formatParams->eColorFormat;
+            } else {
+                if ((mEncodeMode == H263_MODE &&
+                        formatParams->eCompressionFormat != OMX_VIDEO_CodingH263) ||
+                    (mEncodeMode == COMBINE_MODE_WITH_ERR_RES &&
+                        formatParams->eCompressionFormat != OMX_VIDEO_CodingMPEG4) ||
+                    formatParams->eColorFormat != OMX_COLOR_FormatUnused) {
+                    return OMX_ErrorUndefined;
+                }
+            }
+
+            return OMX_ErrorNone;
+        }
+
+        case OMX_IndexParamVideoH263:
+        {
+            OMX_VIDEO_PARAM_H263TYPE *h263type =
+                (OMX_VIDEO_PARAM_H263TYPE *)params;
+
+            if (h263type->nPortIndex != 1) {
+                return OMX_ErrorUndefined;
+            }
+
+            if (h263type->eProfile != OMX_VIDEO_H263ProfileBaseline ||
+                h263type->eLevel != OMX_VIDEO_H263Level45 ||
+                (h263type->nAllowedPictureTypes & OMX_VIDEO_PictureTypeB) ||
+                h263type->bPLUSPTYPEAllowed != OMX_FALSE ||
+                h263type->bForceRoundingTypeToZero != OMX_FALSE ||
+                h263type->nPictureHeaderRepetition != 0 ||
+                h263type->nGOBHeaderInterval != 0) {
+                return OMX_ErrorUndefined;
+            }
+
+            return OMX_ErrorNone;
+        }
+
+        case OMX_IndexParamVideoMpeg4:
+        {
+            OMX_VIDEO_PARAM_MPEG4TYPE *mpeg4type =
+                (OMX_VIDEO_PARAM_MPEG4TYPE *)params;
+
+            if (mpeg4type->nPortIndex != 1) {
+                return OMX_ErrorUndefined;
+            }
+
+            if (mpeg4type->eProfile != OMX_VIDEO_MPEG4ProfileCore ||
+                mpeg4type->eLevel != OMX_VIDEO_MPEG4Level2 ||
+                (mpeg4type->nAllowedPictureTypes & OMX_VIDEO_PictureTypeB) ||
+                mpeg4type->nBFrames != 0 ||
+                mpeg4type->nIDCVLCThreshold != 0 ||
+                mpeg4type->bACPred != OMX_TRUE ||
+                mpeg4type->nMaxPacketSize != 256 ||
+                mpeg4type->nTimeIncRes != 1000 ||
+                mpeg4type->nHeaderExtension != 0 ||
+                mpeg4type->bReversibleVLC != OMX_FALSE) {
+                return OMX_ErrorUndefined;
+            }
+
+            return OMX_ErrorNone;
+        }
+
+        default:
+            return SimpleSoftOMXComponent::internalSetParameter(index, params);
+    }
+}
+
+void SoftMPEG4Encoder::onQueueFilled(OMX_U32 portIndex) {
+    if (mSignalledError || mSawInputEOS) {
+        return;
+    }
+
+    if (!mStarted) {
+        if (OMX_ErrorNone != initEncoder()) {
+            return;
+        }
+    }
+
+    List<BufferInfo *> &inQueue = getPortQueue(0);
+    List<BufferInfo *> &outQueue = getPortQueue(1);
+
+    while (!mSawInputEOS && !inQueue.empty() && !outQueue.empty()) {
+        BufferInfo *inInfo = *inQueue.begin();
+        OMX_BUFFERHEADERTYPE *inHeader = inInfo->mHeader;
+        BufferInfo *outInfo = *outQueue.begin();
+        OMX_BUFFERHEADERTYPE *outHeader = outInfo->mHeader;
+
+        outHeader->nTimeStamp = 0;
+        outHeader->nFlags = 0;
+        outHeader->nOffset = 0;
+        outHeader->nFilledLen = 0;
+        outHeader->nOffset = 0;
+
+        uint8_t *outPtr = (uint8_t *) outHeader->pBuffer;
+        int32_t dataLength = outHeader->nAllocLen;
+
+        if (mNumInputFrames < 0) {
+            if (!PVGetVolHeader(mHandle, outPtr, &dataLength, 0)) {
+                ALOGE("Failed to get VOL header");
+                mSignalledError = true;
+                notify(OMX_EventError, OMX_ErrorUndefined, 0, 0);
+                return;
+            }
+            ALOGV("Output VOL header: %d bytes", dataLength);
+            ++mNumInputFrames;
+            outHeader->nFlags |= OMX_BUFFERFLAG_CODECCONFIG;
+            outHeader->nFilledLen = dataLength;
+            outQueue.erase(outQueue.begin());
+            outInfo->mOwnedByUs = false;
+            notifyFillBufferDone(outHeader);
+            return;
+        }
+
+        // Save the input buffer info so that it can be
+        // passed to an output buffer
+        InputBufferInfo info;
+        info.mTimeUs = inHeader->nTimeStamp;
+        info.mFlags = inHeader->nFlags;
+        mInputBufferInfoVec.push(info);
+
+        if (inHeader->nFlags & OMX_BUFFERFLAG_EOS) {
+            mSawInputEOS = true;
+        }
+
+        if (inHeader->nFilledLen > 0) {
+            const void *inData = inHeader->pBuffer + inHeader->nOffset;
+            uint8_t *inputData = (uint8_t *) inData;
+            if (mVideoColorFormat != OMX_COLOR_FormatYUV420Planar) {
+                ConvertYUV420SemiPlanarToYUV420Planar(
+                    inputData, mInputFrameData, mVideoWidth, mVideoHeight);
+                inputData = mInputFrameData;
+            }
+            CHECK(inputData != NULL);
+
+            VideoEncFrameIO vin, vout;
+            memset(&vin, 0, sizeof(vin));
+            memset(&vout, 0, sizeof(vout));
+            vin.height = ((mVideoHeight  + 15) >> 4) << 4;
+            vin.pitch = ((mVideoWidth + 15) >> 4) << 4;
+            vin.timestamp = (inHeader->nTimeStamp + 500) / 1000;  // in ms
+            vin.yChan = inputData;
+            vin.uChan = vin.yChan + vin.height * vin.pitch;
+            vin.vChan = vin.uChan + ((vin.height * vin.pitch) >> 2);
+
+            unsigned long modTimeMs = 0;
+            int32_t nLayer = 0;
+            MP4HintTrack hintTrack;
+            if (!PVEncodeVideoFrame(mHandle, &vin, &vout,
+                    &modTimeMs, outPtr, &dataLength, &nLayer) ||
+                !PVGetHintTrack(mHandle, &hintTrack)) {
+                ALOGE("Failed to encode frame or get hink track at frame %lld",
+                    mNumInputFrames);
+                mSignalledError = true;
+                notify(OMX_EventError, OMX_ErrorUndefined, 0, 0);
+            }
+            CHECK(NULL == PVGetOverrunBuffer(mHandle));
+            if (hintTrack.CodeType == 0) {  // I-frame serves as sync frame
+                outHeader->nFlags |= OMX_BUFFERFLAG_SYNCFRAME;
+            }
+
+            ++mNumInputFrames;
+        } else {
+            dataLength = 0;
+        }
+
+        inQueue.erase(inQueue.begin());
+        inInfo->mOwnedByUs = false;
+        notifyEmptyBufferDone(inHeader);
+
+        outQueue.erase(outQueue.begin());
+        CHECK(!mInputBufferInfoVec.empty());
+        InputBufferInfo *inputBufInfo = mInputBufferInfoVec.begin();
+        mInputBufferInfoVec.erase(mInputBufferInfoVec.begin());
+        outHeader->nTimeStamp = inputBufInfo->mTimeUs;
+        outHeader->nFlags |= (inputBufInfo->mFlags | OMX_BUFFERFLAG_ENDOFFRAME);
+        outHeader->nFilledLen = dataLength;
+        outInfo->mOwnedByUs = false;
+        notifyFillBufferDone(outHeader);
+    }
+}
+
+}  // namespace android
+
+android::SoftOMXComponent *createSoftOMXComponent(
+        const char *name, const OMX_CALLBACKTYPE *callbacks,
+        OMX_PTR appData, OMX_COMPONENTTYPE **component) {
+    return new android::SoftMPEG4Encoder(name, callbacks, appData, component);
+}
diff --git a/media/libstagefright/codecs/m4v_h263/enc/SoftMPEG4Encoder.h b/media/libstagefright/codecs/m4v_h263/enc/SoftMPEG4Encoder.h
new file mode 100644
index 0000000..3e90d54
--- /dev/null
+++ b/media/libstagefright/codecs/m4v_h263/enc/SoftMPEG4Encoder.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SOFT_MPEG4_ENCODER_H_
+#define SOFT_MPEG4_ENCODER_H_
+
+#include <media/stagefright/MediaBuffer.h>
+#include <media/stagefright/foundation/ABase.h>
+#include "SimpleSoftOMXComponent.h"
+#include "mp4enc_api.h"
+
+
+namespace android {
+
+struct MediaBuffer;
+
+struct SoftMPEG4Encoder : public SimpleSoftOMXComponent {
+    SoftMPEG4Encoder(
+            const char *name,
+            const OMX_CALLBACKTYPE *callbacks,
+            OMX_PTR appData,
+            OMX_COMPONENTTYPE **component);
+
+    // Override SimpleSoftOMXComponent methods
+    virtual OMX_ERRORTYPE internalGetParameter(
+            OMX_INDEXTYPE index, OMX_PTR params);
+
+    virtual OMX_ERRORTYPE internalSetParameter(
+            OMX_INDEXTYPE index, const OMX_PTR params);
+
+    virtual void onQueueFilled(OMX_U32 portIndex);
+
+protected:
+    virtual ~SoftMPEG4Encoder();
+
+private:
+    enum {
+        kNumBuffers = 2,
+    };
+
+    // OMX input buffer's timestamp and flags
+    typedef struct {
+        int64_t mTimeUs;
+        int32_t mFlags;
+    } InputBufferInfo;
+
+    MP4EncodingMode mEncodeMode;
+    int32_t  mVideoWidth;
+    int32_t  mVideoHeight;
+    int32_t  mVideoFrameRate;
+    int32_t  mVideoBitRate;
+    int32_t  mVideoColorFormat;
+    int32_t  mIDRFrameRefreshIntervalInSec;
+
+    int64_t  mNumInputFrames;
+    bool     mStarted;
+    bool     mSawInputEOS;
+    bool     mSignalledError;
+
+    tagvideoEncControls   *mHandle;
+    tagvideoEncOptions    *mEncParams;
+    uint8_t               *mInputFrameData;
+    Vector<InputBufferInfo> mInputBufferInfoVec;
+
+    void initPorts();
+    OMX_ERRORTYPE initEncParams();
+    OMX_ERRORTYPE initEncoder();
+    OMX_ERRORTYPE releaseEncoder();
+
+    DISALLOW_EVIL_CONSTRUCTORS(SoftMPEG4Encoder);
+};
+
+}  // namespace android
+
+#endif  // SOFT_MPEG4_ENCODER_H_
diff --git a/media/libstagefright/include/AVCEncoder.h b/media/libstagefright/include/AVCEncoder.h
deleted file mode 100644
index 83e1f97..0000000
--- a/media/libstagefright/include/AVCEncoder.h
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright (C) 2010 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef AVC_ENCODER_H_
-
-#define AVC_ENCODER_H_
-
-#include <media/stagefright/MediaBuffer.h>
-#include <media/stagefright/MediaSource.h>
-#include <utils/Vector.h>
-
-struct tagAVCHandle;
-struct tagAVCEncParam;
-
-namespace android {
-
-struct MediaBuffer;
-struct MediaBufferGroup;
-
-struct AVCEncoder : public MediaSource,
-                    public MediaBufferObserver {
-    AVCEncoder(const sp<MediaSource> &source,
-            const sp<MetaData>& meta);
-
-    virtual status_t start(MetaData *params);
-    virtual status_t stop();
-
-    virtual sp<MetaData> getFormat();
-
-    virtual status_t read(
-            MediaBuffer **buffer, const ReadOptions *options);
-
-    virtual void signalBufferReturned(MediaBuffer *buffer);
-
-    // Callbacks required by the encoder
-    int32_t allocOutputBuffers(unsigned int sizeInMbs, unsigned int numBuffers);
-    void    unbindOutputBuffer(int32_t index);
-    int32_t bindOutputBuffer(int32_t index, uint8_t **yuv);
-
-protected:
-    virtual ~AVCEncoder();
-
-private:
-    sp<MediaSource> mSource;
-    sp<MetaData>    mFormat;
-    sp<MetaData>    mMeta;
-
-    int32_t  mVideoWidth;
-    int32_t  mVideoHeight;
-    int32_t  mVideoFrameRate;
-    int32_t  mVideoBitRate;
-    int32_t  mVideoColorFormat;
-    int64_t  mNumInputFrames;
-    int64_t  mPrevTimestampUs;
-    status_t mInitCheck;
-    bool     mStarted;
-    bool     mSpsPpsHeaderReceived;
-    bool     mReadyForNextFrame;
-    int32_t  mIsIDRFrame;  // for set kKeyIsSyncFrame
-
-    tagAVCHandle          *mHandle;
-    tagAVCEncParam        *mEncParams;
-    MediaBuffer           *mInputBuffer;
-    uint8_t               *mInputFrameData;
-    MediaBufferGroup      *mGroup;
-    Vector<MediaBuffer *> mOutputBuffers;
-
-
-    status_t initCheck(const sp<MetaData>& meta);
-    void releaseOutputBuffers();
-
-    AVCEncoder(const AVCEncoder &);
-    AVCEncoder &operator=(const AVCEncoder &);
-};
-
-}  // namespace android
-
-#endif  // AVC_ENCODER_H_
diff --git a/media/libstagefright/include/AwesomePlayer.h b/media/libstagefright/include/AwesomePlayer.h
index 9115f91..a2e2e85 100644
--- a/media/libstagefright/include/AwesomePlayer.h
+++ b/media/libstagefright/include/AwesomePlayer.h
@@ -232,7 +232,6 @@
 
     int64_t mLastVideoTimeUs;
     TimedTextDriver *mTextDriver;
-    mutable Mutex mTimedTextLock;
 
     sp<WVMExtractor> mWVMExtractor;
     sp<MediaExtractor> mExtractor;
diff --git a/media/libstagefright/include/M4vH263Encoder.h b/media/libstagefright/include/M4vH263Encoder.h
deleted file mode 100644
index dbe9fd0..0000000
--- a/media/libstagefright/include/M4vH263Encoder.h
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * Copyright (C) 2010 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef M4V_H263_ENCODER_H_
-
-#define M4V_H263_ENCODER_H_
-
-#include <media/stagefright/MediaBuffer.h>
-#include <media/stagefright/MediaSource.h>
-
-struct tagvideoEncControls;
-struct tagvideoEncOptions;
-
-namespace android {
-
-struct MediaBuffer;
-struct MediaBufferGroup;
-
-struct M4vH263Encoder : public MediaSource,
-                    public MediaBufferObserver {
-    M4vH263Encoder(const sp<MediaSource> &source,
-            const sp<MetaData>& meta);
-
-    virtual status_t start(MetaData *params);
-    virtual status_t stop();
-
-    virtual sp<MetaData> getFormat();
-
-    virtual status_t read(
-            MediaBuffer **buffer, const ReadOptions *options);
-
-    virtual void signalBufferReturned(MediaBuffer *buffer);
-
-protected:
-    virtual ~M4vH263Encoder();
-
-private:
-    sp<MediaSource> mSource;
-    sp<MetaData>    mFormat;
-    sp<MetaData>    mMeta;
-
-    int32_t  mVideoWidth;
-    int32_t  mVideoHeight;
-    int32_t  mVideoFrameRate;
-    int32_t  mVideoBitRate;
-    int32_t  mVideoColorFormat;
-    int64_t  mNumInputFrames;
-    int64_t  mNextModTimeUs;
-    int64_t  mPrevTimestampUs;
-    status_t mInitCheck;
-    bool     mStarted;
-
-    tagvideoEncControls   *mHandle;
-    tagvideoEncOptions    *mEncParams;
-    MediaBuffer           *mInputBuffer;
-    uint8_t               *mInputFrameData;
-    MediaBufferGroup      *mGroup;
-
-    status_t initCheck(const sp<MetaData>& meta);
-    void releaseOutputBuffers();
-
-    M4vH263Encoder(const M4vH263Encoder &);
-    M4vH263Encoder &operator=(const M4vH263Encoder &);
-};
-
-}  // namespace android
-
-#endif  // M4V_H263_ENCODER_H_
diff --git a/media/libstagefright/include/WVMExtractor.h b/media/libstagefright/include/WVMExtractor.h
index 3c3ca89..2b952e2 100644
--- a/media/libstagefright/include/WVMExtractor.h
+++ b/media/libstagefright/include/WVMExtractor.h
@@ -34,6 +34,7 @@
 
     virtual int64_t getCachedDurationUs(status_t *finalStatus) = 0;
     virtual void setAdaptiveStreamingMode(bool adaptive) = 0;
+    virtual void setCryptoPluginMode(bool cryptoPluginMode) = 0;
     virtual void setUID(uid_t uid) = 0;
 };
 
@@ -61,6 +62,12 @@
     // is used.
     void setAdaptiveStreamingMode(bool adaptive);
 
+    // setCryptoPluginMode(true) to select crypto plugin mode.
+    // In this mode, the extractor returns encrypted data for use
+    // with the MediaCodec model, which handles the decryption in the
+    // codec.
+    void setCryptoPluginMode(bool cryptoPluginMode);
+
     void setUID(uid_t uid);
 
     static bool getVendorLibHandle();
diff --git a/media/libstagefright/mpeg2ts/ESQueue.cpp b/media/libstagefright/mpeg2ts/ESQueue.cpp
index 7fd99a8..1cab077 100644
--- a/media/libstagefright/mpeg2ts/ESQueue.cpp
+++ b/media/libstagefright/mpeg2ts/ESQueue.cpp
@@ -305,10 +305,7 @@
 }
 
 sp<ABuffer> ElementaryStreamQueue::dequeueAccessUnitAAC() {
-    Vector<size_t> ranges;
-    Vector<size_t> frameOffsets;
-    Vector<size_t> frameSizes;
-    size_t auSize = 0;
+    int64_t timeUs;
 
     size_t offset = 0;
     while (offset + 7 <= mBuffer->size()) {
@@ -332,6 +329,8 @@
             mFormat = MakeAACCodecSpecificData(
                     profile, sampling_freq_index, channel_configuration);
 
+            mFormat->setInt32(kKeyIsADTS, true);
+
             int32_t sampleRate;
             int32_t numChannels;
             CHECK(mFormat->findInt32(kKeySampleRate, &sampleRate));
@@ -367,10 +366,12 @@
 
         size_t headerSize = protection_absent ? 7 : 9;
 
-        ranges.push(aac_frame_length);
-        frameOffsets.push(offset + headerSize);
-        frameSizes.push(aac_frame_length - headerSize);
-        auSize += aac_frame_length - headerSize;
+        int64_t tmpUs = fetchTimestamp(aac_frame_length);
+        CHECK_GE(tmpUs, 0ll);
+
+        if (offset == 0) {
+            timeUs = tmpUs;
+        }
 
         offset += aac_frame_length;
     }
@@ -379,37 +380,14 @@
         return NULL;
     }
 
-    int64_t timeUs = -1;
-
-    for (size_t i = 0; i < ranges.size(); ++i) {
-        int64_t tmpUs = fetchTimestamp(ranges.itemAt(i));
-
-        if (i == 0) {
-            timeUs = tmpUs;
-        }
-    }
-
-    sp<ABuffer> accessUnit = new ABuffer(auSize);
-    size_t dstOffset = 0;
-    for (size_t i = 0; i < frameOffsets.size(); ++i) {
-        size_t frameOffset = frameOffsets.itemAt(i);
-
-        memcpy(accessUnit->data() + dstOffset,
-               mBuffer->data() + frameOffset,
-               frameSizes.itemAt(i));
-
-        dstOffset += frameSizes.itemAt(i);
-    }
+    sp<ABuffer> accessUnit = new ABuffer(offset);
+    memcpy(accessUnit->data(), mBuffer->data(), offset);
 
     memmove(mBuffer->data(), mBuffer->data() + offset,
             mBuffer->size() - offset);
     mBuffer->setRange(0, mBuffer->size() - offset);
 
-    if (timeUs >= 0) {
-        accessUnit->meta()->setInt64("timeUs", timeUs);
-    } else {
-        ALOGW("no time for AAC access unit");
-    }
+    accessUnit->meta()->setInt64("timeUs", timeUs);
 
     return accessUnit;
 }
diff --git a/media/libstagefright/omx/SoftOMXPlugin.cpp b/media/libstagefright/omx/SoftOMXPlugin.cpp
index 9b7bb5a..6e53095 100644
--- a/media/libstagefright/omx/SoftOMXPlugin.cpp
+++ b/media/libstagefright/omx/SoftOMXPlugin.cpp
@@ -45,7 +45,9 @@
     { "OMX.google.g711.alaw.decoder", "g711dec", "audio_decoder.g711alaw" },
     { "OMX.google.g711.mlaw.decoder", "g711dec", "audio_decoder.g711mlaw" },
     { "OMX.google.h263.decoder", "mpeg4dec", "video_decoder.h263" },
+    { "OMX.google.h263.encoder", "mpeg4enc", "video_encoder.h263" },
     { "OMX.google.mpeg4.decoder", "mpeg4dec", "video_decoder.mpeg4" },
+    { "OMX.google.mpeg4.encoder", "mpeg4enc", "video_encoder.mpeg4" },
     { "OMX.google.mp3.decoder", "mp3dec", "audio_decoder.mp3" },
     { "OMX.google.vorbis.decoder", "vorbisdec", "audio_decoder.vorbis" },
     { "OMX.google.vpx.decoder", "vpxdec", "video_decoder.vpx" },
diff --git a/media/libstagefright/timedtext/TimedTextPlayer.cpp b/media/libstagefright/timedtext/TimedTextPlayer.cpp
index dc5f6b8..f855d90 100644
--- a/media/libstagefright/timedtext/TimedTextPlayer.cpp
+++ b/media/libstagefright/timedtext/TimedTextPlayer.cpp
@@ -112,15 +112,14 @@
               break;
             }
             sp<RefBase> obj;
-            msg->findObject("subtitle", &obj);
-            if (obj != NULL) {
+            if (msg->findObject("subtitle", &obj)) {
                 sp<ParcelEvent> parcelEvent;
                 parcelEvent = static_cast<ParcelEvent*>(obj.get());
                 notifyListener(&(parcelEvent->parcel));
+                doRead();
             } else {
                 notifyListener();
             }
-            doRead();
             break;
         }
         case kWhatSetSource: {
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 6eeda9a..06b0062 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -142,6 +142,23 @@
 
 nsecs_t AudioFlinger::mStandbyTimeInNsecs = kDefaultStandbyTimeInNsecs;
 
+// Whether to use fast mixer
+static const enum {
+    FastMixer_Never,    // never initialize or use: for debugging only
+    FastMixer_Always,   // always initialize and use, even if not needed: for debugging only
+                        // normal mixer multiplier is 1
+    FastMixer_Static,   // initialize if needed, then use all the time if initialized,
+                        // multipler is calculated based on minimum normal mixer buffer size
+    FastMixer_Dynamic,  // initialize if needed, then use dynamically depending on track load,
+                        // multipler is calculated based on minimum normal mixer buffer size
+    // FIXME for FastMixer_Dynamic:
+    //  Supporting this option will require fixing HALs that can't handle large writes.
+    //  For example, one HAL implementation returns an error from a large write,
+    //  and another HAL implementation corrupts memory, possibly in the sample rate converter.
+    //  We could either fix the HAL implementations, or provide a wrapper that breaks
+    //  up large writes into smaller ones, and the wrapper would need to deal with scheduler.
+} kUseFastMixer = FastMixer_Static;
+
 // ----------------------------------------------------------------------------
 
 #ifdef ADD_BATTERY_DATA
@@ -1475,14 +1492,11 @@
         mMasterVolume(audioFlinger->masterVolumeSW_l()),
         mLastWriteTime(0), mNumWrites(0), mNumDelayedWrites(0), mInWrite(false),
         mMixerStatus(MIXER_IDLE),
-        mPrevMixerStatus(MIXER_IDLE),
+        mMixerStatusIgnoringFastTracks(MIXER_IDLE),
         standbyDelay(AudioFlinger::mStandbyTimeInNsecs),
-        mFastTrackAvailMask(((1 << FastMixerState::kMaxFastTracks) - 1) & ~1),
-        mFastTrackNewMask(0)
+        // index 0 is reserved for normal mixer's submix
+        mFastTrackAvailMask(((1 << FastMixerState::kMaxFastTracks) - 1) & ~1)
 {
-#if !LOG_NDEBUG
-    memset(mFastTrackNewArray, 0, sizeof(mFastTrackNewArray));
-#endif
     snprintf(mName, kNameLength, "AudioOut_%X", id);
 
     readOutputParameters();
@@ -1534,8 +1548,7 @@
 
     snprintf(buffer, SIZE, "Output thread %p tracks\n", this);
     result.append(buffer);
-    result.append("   Name Client Type Fmt Chn mask   Session Frames S M F SRate  L dB  R dB  "
-                  "Server     User       Main buf   Aux Buf\n");
+    Track::appendDumpHeader(result);
     for (size_t i = 0; i < mTracks.size(); ++i) {
         sp<Track> track = mTracks[i];
         if (track != 0) {
@@ -1546,8 +1559,7 @@
 
     snprintf(buffer, SIZE, "Output thread %p active tracks\n", this);
     result.append(buffer);
-    result.append("   Name Client Type Fmt Chn mask   Session Frames S M F SRate  L dB  R dB  "
-                  "Server     User       Main buf   Aux Buf\n");
+    Track::appendDumpHeader(result);
     for (size_t i = 0; i < mActiveTracks.size(); ++i) {
         sp<Track> track = mActiveTracks[i].promote();
         if (track != 0) {
@@ -1623,8 +1635,8 @@
     bool isTimed = (flags & IAudioFlinger::TRACK_TIMED) != 0;
 
     // client expresses a preference for FAST, but we get the final say
-    if ((flags & IAudioFlinger::TRACK_FAST) &&
-          !(
+    if (flags & IAudioFlinger::TRACK_FAST) {
+      if (
             // not timed
             (!isTimed) &&
             // either of these use cases:
@@ -1633,11 +1645,11 @@
               (
                 (sharedBuffer != 0)
               ) ||
-              // use case 2: callback handler and frame count at least as large as HAL
+              // use case 2: callback handler and frame count is default or at least as large as HAL
               (
                 (tid != -1) &&
-                // FIXME supported frame counts should not be hard-coded
-                frameCount >= (int) mFrameCount // FIXME int cast is due to wrong parameter type
+                ((frameCount == 0) ||
+                (frameCount >= (int) (mFrameCount * 2))) // * 2 is due to SRC jitter, see below
               )
             ) &&
             // PCM data
@@ -1655,17 +1667,35 @@
             (mFastTrackAvailMask != 0)
             // FIXME test that MixerThread for this fast track has a capable output HAL
             // FIXME add a permission test also?
-          ) ) {
-        ALOGW("AUDIO_POLICY_FLAG_FAST denied: isTimed=%d sharedBuffer=%p frameCount=%d "
+        ) {
+        // if frameCount not specified, then it defaults to fast mixer (HAL) frame count
+        if (frameCount == 0) {
+            frameCount = mFrameCount * 2;   // FIXME * 2 is due to SRC jitter, should be computed
+        }
+        ALOGI("AUDIO_OUTPUT_FLAG_FAST accepted: frameCount=%d mFrameCount=%d",
+                frameCount, mFrameCount);
+      } else {
+        ALOGW("AUDIO_OUTPUT_FLAG_FAST denied: isTimed=%d sharedBuffer=%p frameCount=%d "
                 "mFrameCount=%d format=%d isLinear=%d channelMask=%d sampleRate=%d mSampleRate=%d "
                 "hasFastMixer=%d tid=%d fastTrackAvailMask=%#x",
                 isTimed, sharedBuffer.get(), frameCount, mFrameCount, format,
                 audio_is_linear_pcm(format),
                 channelMask, sampleRate, mSampleRate, hasFastMixer(), tid, mFastTrackAvailMask);
         flags &= ~IAudioFlinger::TRACK_FAST;
-        if (0 < frameCount && frameCount < (int) mNormalFrameCount) {
-            frameCount = mNormalFrameCount;
+        // For compatibility with AudioTrack calculation, buffer depth is forced
+        // to be at least 2 x the normal mixer frame count and cover audio hardware latency.
+        // This is probably too conservative, but legacy application code may depend on it.
+        // If you change this calculation, also review the start threshold which is related.
+        uint32_t latencyMs = mOutput->stream->get_latency(mOutput->stream);
+        uint32_t minBufCount = latencyMs / ((1000 * mNormalFrameCount) / mSampleRate);
+        if (minBufCount < 2) {
+            minBufCount = 2;
         }
+        int minFrameCount = mNormalFrameCount * minBufCount;
+        if (frameCount < minFrameCount) {
+            frameCount = minFrameCount;
+        }
+      }
     }
 
     if (mType == DIRECT) {
@@ -1832,6 +1862,7 @@
 void AudioFlinger::PlaybackThread::destroyTrack_l(const sp<Track>& track)
 {
     track->mState = TrackBase::TERMINATED;
+    // active tracks are removed by threadLoop()
     if (mActiveTracks.indexOf(track) < 0) {
         removeTrack_l(track);
     }
@@ -1841,6 +1872,16 @@
 {
     mTracks.remove(track);
     deleteTrackName_l(track->name());
+    // redundant as track is about to be destroyed, for dumpsys only
+    track->mName = -1;
+    if (track->isFastTrack()) {
+        int index = track->mFastIndex;
+        ALOG_ASSERT(0 < index && index < FastMixerState::kMaxFastTracks);
+        ALOG_ASSERT(!(mFastTrackAvailMask & (1 << index)));
+        mFastTrackAvailMask |= 1 << index;
+        // redundant as track is about to be destroyed, for dumpsys only
+        track->mFastIndex = -1;
+    }
     sp<EffectChain> chain = getEffectChain_l(track->sessionId());
     if (chain != 0) {
         chain->decTrackCnt();
@@ -1903,17 +1944,19 @@
                 mFrameCount);
     }
 
-    // Calculate size of normal mix buffer
-    if (mType == MIXER) {
+    // Calculate size of normal mix buffer relative to the HAL output buffer size
+    uint32_t multiple = 1;
+    if (mType == MIXER && (kUseFastMixer == FastMixer_Static || kUseFastMixer == FastMixer_Dynamic)) {
         size_t minNormalFrameCount = (kMinNormalMixBufferSizeMs * mSampleRate) / 1000;
-        mNormalFrameCount = ((minNormalFrameCount + mFrameCount - 1) / mFrameCount) * mFrameCount;
-        if (mNormalFrameCount & 15) {
-            ALOGW("Normal mix buffer size is %u frames but AudioMixer requires multiples of 16 "
-                  "frames", mNormalFrameCount);
+        multiple = (minNormalFrameCount + mFrameCount - 1) / mFrameCount;
+        // force multiple to be even, for compatibility with doubling of fast tracks due to HAL SRC
+        // (it would be unusual for the normal mix buffer size to not be a multiple of fast track)
+        // FIXME this rounding up should not be done if no HAL SRC
+        if ((multiple > 2) && (multiple & 1)) {
+            ++multiple;
         }
-    } else {
-        mNormalFrameCount = mFrameCount;
     }
+    mNormalFrameCount = multiple * mFrameCount;
     ALOGI("HAL output buffer size %u frames, normal mix buffer size %u frames", mFrameCount, mNormalFrameCount);
 
     // FIXME - Current mixer implementation only supports stereo output: Always
@@ -2090,8 +2133,21 @@
     ssize_t index = mOutputSink->negotiate(offers, 1, NULL, numCounterOffers);
     ALOG_ASSERT(index == 0);
 
-    // initialize fast mixer if needed
-    if (mFrameCount < mNormalFrameCount) {
+    // initialize fast mixer depending on configuration
+    bool initFastMixer;
+    switch (kUseFastMixer) {
+    case FastMixer_Never:
+        initFastMixer = false;
+        break;
+    case FastMixer_Always:
+        initFastMixer = true;
+        break;
+    case FastMixer_Static:
+    case FastMixer_Dynamic:
+        initFastMixer = mFrameCount < mNormalFrameCount;
+        break;
+    }
+    if (initFastMixer) {
 
         // create a MonoPipe to connect our submix to FastMixer
         NBAIO_Format format = mOutputSink->format();
@@ -2149,7 +2205,19 @@
     } else {
         mFastMixer = NULL;
     }
-    mNormalSink = mOutputSink;
+
+    switch (kUseFastMixer) {
+    case FastMixer_Never:
+    case FastMixer_Dynamic:
+        mNormalSink = mOutputSink;
+        break;
+    case FastMixer_Always:
+        mNormalSink = mPipeSink;
+        break;
+    case FastMixer_Static:
+        mNormalSink = initFastMixer ? mPipeSink : mOutputSink;
+        break;
+    }
 }
 
 AudioFlinger::MixerThread::~MixerThread()
@@ -2374,7 +2442,8 @@
                     ALOGV("%s waking up", myName.string());
                     acquireWakeLock_l();
 
-                    mPrevMixerStatus = MIXER_IDLE;
+                    mMixerStatus = MIXER_IDLE;
+                    mMixerStatusIgnoringFastTracks = MIXER_IDLE;
 
                     checkSilentMode_l();
 
@@ -2388,11 +2457,8 @@
                 }
             }
 
-            mixer_state newMixerStatus = prepareTracks_l(&tracksToRemove);
-            // Shift in the new status; this could be a queue if it's
-            // useful to filter the mixer status over several cycles.
-            mPrevMixerStatus = mMixerStatus;
-            mMixerStatus = newMixerStatus;
+            // mMixerStatusIgnoringFastTracks is also updated internally
+            mMixerStatus = prepareTracks_l(&tracksToRemove);
 
             // prevent any changes in effect chain list and in each effect chain
             // during mixing and effect process as the audio buffers could be deleted
@@ -2484,98 +2550,9 @@
     return false;
 }
 
-// FIXME This method needs a better name.
-// It pushes a new fast mixer state and returns (via tracksToRemove) a set of tracks to remove.
+// returns (via tracksToRemove) a set of tracks to remove.
 void AudioFlinger::MixerThread::threadLoop_removeTracks(const Vector< sp<Track> >& tracksToRemove)
 {
-    // were any of the removed tracks also fast tracks?
-    unsigned removedMask = 0;
-    for (size_t i = 0; i < tracksToRemove.size(); ++i) {
-        if (tracksToRemove[i]->isFastTrack()) {
-            int j = tracksToRemove[i]->mFastIndex;
-            ALOG_ASSERT(0 < j && j < FastMixerState::kMaxFastTracks);
-            removedMask |= 1 << j;
-        }
-    }
-    Track* newArray[FastMixerState::kMaxFastTracks];
-    unsigned newMask;
-    {
-        AutoMutex _l(mLock);
-        mFastTrackAvailMask |= removedMask;
-        newMask = mFastTrackNewMask;
-        if (newMask) {
-            mFastTrackNewMask = 0;
-            memcpy(newArray, mFastTrackNewArray, sizeof(mFastTrackNewArray));
-#if !LOG_NDEBUG
-            memset(mFastTrackNewArray, 0, sizeof(mFastTrackNewArray));
-#endif
-        }
-    }
-    unsigned changedMask = newMask | removedMask;
-    // are there any newly added or removed fast tracks?
-    if (changedMask) {
-
-        // This assert would be incorrect because it's theoretically possible (though unlikely)
-        // for a track to be created and then removed within the same normal mix cycle:
-        //    ALOG_ASSERT(!(newMask & removedMask));
-        // The converse, of removing a track and then creating a new track at the identical slot
-        // within the same normal mix cycle, is impossible because the slot isn't marked available.
-
-        // prepare a new state to push
-        FastMixerStateQueue *sq = mFastMixer->sq();
-        FastMixerState *state = sq->begin();
-        FastMixerStateQueue::block_t block = FastMixerStateQueue::BLOCK_UNTIL_PUSHED;
-        while (changedMask) {
-            int j = __builtin_ctz(changedMask);
-            ALOG_ASSERT(0 < j && j < FastMixerState::kMaxFastTracks);
-            changedMask &= ~(1 << j);
-            FastTrack *fastTrack = &state->mFastTracks[j];
-            // must first do new tracks, then removed tracks, in case same track in both
-            if (newMask & (1 << j)) {
-                ALOG_ASSERT(!(state->mTrackMask & (1 << j)));
-                ALOG_ASSERT(fastTrack->mBufferProvider == NULL &&
-                        fastTrack->mVolumeProvider == NULL);
-                Track *track = newArray[j];
-                AudioBufferProvider *abp = track;
-                VolumeProvider *vp = track;
-                fastTrack->mBufferProvider = abp;
-                fastTrack->mVolumeProvider = vp;
-                fastTrack->mSampleRate = track->mSampleRate;
-                fastTrack->mChannelMask = track->mChannelMask;
-                state->mTrackMask |= 1 << j;
-            }
-            if (removedMask & (1 << j)) {
-                ALOG_ASSERT(state->mTrackMask & (1 << j));
-                ALOG_ASSERT(fastTrack->mBufferProvider != NULL &&
-                        fastTrack->mVolumeProvider != NULL);
-                fastTrack->mBufferProvider = NULL;
-                fastTrack->mVolumeProvider = NULL;
-                fastTrack->mSampleRate = mSampleRate;
-                fastTrack->mChannelMask = AUDIO_CHANNEL_OUT_STEREO;
-                state->mTrackMask &= ~(1 << j);
-            }
-            fastTrack->mGeneration++;
-        }
-        state->mFastTracksGen++;
-        // if the fast mixer was active, but now there are no fast tracks, then put it in cold idle
-        if (state->mCommand == FastMixerState::MIX_WRITE && state->mTrackMask <= 1) {
-            state->mCommand = FastMixerState::COLD_IDLE;
-            state->mColdFutexAddr = &mFastMixerFutex;
-            state->mColdGen++;
-            mFastMixerFutex = 0;
-            mNormalSink = mOutputSink;
-            block = FastMixerStateQueue::BLOCK_UNTIL_ACKED;
-        }
-        sq->end();
-        // If any fast tracks were removed, we must wait for acknowledgement
-        // because we're about to decrement the last sp<> on those tracks.
-        // Similarly if we put it into cold idle, need to wait for acknowledgement
-        // so that it stops doing I/O.
-        if (removedMask) {
-            block = FastMixerStateQueue::BLOCK_UNTIL_ACKED;
-        }
-        sq->push(block);
-    }
     PlaybackThread::threadLoop_removeTracks(tracksToRemove);
 }
 
@@ -2586,7 +2563,8 @@
     if (mFastMixer != NULL) {
         FastMixerStateQueue *sq = mFastMixer->sq();
         FastMixerState *state = sq->begin();
-        if (state->mCommand != FastMixerState::MIX_WRITE && state->mTrackMask > 1) {
+        if (state->mCommand != FastMixerState::MIX_WRITE &&
+                (kUseFastMixer != FastMixer_Dynamic || state->mTrackMask > 1)) {
             if (state->mCommand == FastMixerState::COLD_IDLE) {
                 int32_t old = android_atomic_inc(&mFastMixerFutex);
                 if (old == -1) {
@@ -2596,7 +2574,9 @@
             state->mCommand = FastMixerState::MIX_WRITE;
             sq->end();
             sq->push(FastMixerStateQueue::BLOCK_UNTIL_PUSHED);
-            mNormalSink = mPipeSink;
+            if (kUseFastMixer == FastMixer_Dynamic) {
+                mNormalSink = mPipeSink;
+            }
         } else {
             sq->end(false /*didModify*/);
         }
@@ -2610,26 +2590,15 @@
     // FIXME rewrite to reduce number of system calls
     mLastWriteTime = systemTime();
     mInWrite = true;
-    int bytesWritten;
 
-    // If an NBAIO sink is present, use it to write the normal mixer's submix
-    if (mNormalSink != 0) {
 #define mBitShift 2 // FIXME
-        size_t count = mixBufferSize >> mBitShift;
-        ssize_t framesWritten = mNormalSink->write(mMixBuffer, count);
-        if (framesWritten > 0) {
-            bytesWritten = framesWritten << mBitShift;
-        } else {
-            bytesWritten = framesWritten;
-        }
-
-    // otherwise use the HAL / AudioStreamOut directly
-    } else {
-        // FIXME legacy, remove
-        bytesWritten = (int)mOutput->stream->write(mOutput->stream, mMixBuffer, mixBufferSize);
+    size_t count = mixBufferSize >> mBitShift;
+    ssize_t framesWritten = mNormalSink->write(mMixBuffer, count);
+    if (framesWritten > 0) {
+        size_t bytesWritten = framesWritten << mBitShift;
+        mBytesWritten += bytesWritten;
     }
 
-    if (bytesWritten > 0) mBytesWritten += mixBufferSize;
     mNumWrites++;
     mInWrite = false;
 }
@@ -2648,7 +2617,9 @@
             sq->end();
             // BLOCK_UNTIL_PUSHED would be insufficient, as we need it to stop doing I/O now
             sq->push(FastMixerStateQueue::BLOCK_UNTIL_ACKED);
-            mNormalSink = mOutputSink;
+            if (kUseFastMixer == FastMixer_Dynamic) {
+                mNormalSink = mOutputSink;
+            }
         } else {
             sq->end(false /*didModify*/);
         }
@@ -2731,7 +2702,9 @@
     size_t count = mActiveTracks.size();
     size_t mixedTracks = 0;
     size_t tracksWithEffect = 0;
+    // counts only _active_ fast tracks
     size_t fastTracks = 0;
+    uint32_t resetMask = 0; // bit mask of fast tracks that need to be reset
 
     float masterVolume = mMasterVolume;
     bool masterMute = mMasterMute;
@@ -2748,6 +2721,16 @@
         chain.clear();
     }
 
+    // prepare a new state to push
+    FastMixerStateQueue *sq = NULL;
+    FastMixerState *state = NULL;
+    bool didModify = false;
+    FastMixerStateQueue::block_t block = FastMixerStateQueue::BLOCK_UNTIL_PUSHED;
+    if (mFastMixer != NULL) {
+        sq = mFastMixer->sq();
+        state = sq->begin();
+    }
+
     for (size_t i=0 ; i<count ; i++) {
         sp<Track> t = mActiveTracks[i].promote();
         if (t == 0) continue;
@@ -2755,13 +2738,98 @@
         // this const just means the local variable doesn't change
         Track* const track = t.get();
 
+        // process fast tracks
         if (track->isFastTrack()) {
-            // cache the combined master volume and stream type volume for fast mixer;
-            // this lacks any synchronization or barrier so VolumeProvider may read a stale value
-            track->mCachedVolume = masterVolume * mStreamTypes[track->streamType()].volume;
-            ++fastTracks;
-            if (track->isTerminated()) {
-                tracksToRemove->add(track);
+
+            // It's theoretically possible (though unlikely) for a fast track to be created
+            // and then removed within the same normal mix cycle.  This is not a problem, as
+            // the track never becomes active so it's fast mixer slot is never touched.
+            // The converse, of removing an (active) track and then creating a new track
+            // at the identical fast mixer slot within the same normal mix cycle,
+            // is impossible because the slot isn't marked available until the end of each cycle.
+            int j = track->mFastIndex;
+            FastTrack *fastTrack = &state->mFastTracks[j];
+
+            // Determine whether the track is currently in underrun condition,
+            // and whether it had a recent underrun.
+            uint32_t underruns = mFastMixerDumpState.mTracks[j].mUnderruns;
+            uint32_t recentUnderruns = (underruns - (track->mObservedUnderruns & ~1)) >> 1;
+            // don't count underruns that occur while stopping or pausing
+            if (!(track->isStopped() || track->isPausing())) {
+                track->mUnderrunCount += recentUnderruns;
+            }
+            track->mObservedUnderruns = underruns;
+
+            // This is similar to the formula for normal tracks,
+            // with a few modifications for fast tracks.
+            bool isActive;
+            if (track->isStopped()) {
+                // track stays active after stop() until first underrun
+                isActive = recentUnderruns == 0;
+            } else if (track->isPaused() || track->isTerminated()) {
+                isActive = false;
+            } else if (track->isPausing()) {
+                // ramp down is not yet implemented
+                isActive = true;
+                track->setPaused();
+            } else if (track->isResuming()) {
+                // ramp up is not yet implemented
+                isActive = true;
+                track->mState = TrackBase::ACTIVE;
+            } else {
+                // no minimum frame count for fast tracks; continual underrun is allowed,
+                // but later could implement automatic pause after several consecutive underruns,
+                // or auto-mute yet still consider the track active and continue to service it
+                isActive = true;
+            }
+
+            if (isActive) {
+                // was it previously inactive?
+                if (!(state->mTrackMask & (1 << j))) {
+                    ExtendedAudioBufferProvider *eabp = track;
+                    VolumeProvider *vp = track;
+                    fastTrack->mBufferProvider = eabp;
+                    fastTrack->mVolumeProvider = vp;
+                    fastTrack->mSampleRate = track->mSampleRate;
+                    fastTrack->mChannelMask = track->mChannelMask;
+                    fastTrack->mGeneration++;
+                    state->mTrackMask |= 1 << j;
+                    didModify = true;
+                    // no acknowledgement required for newly active tracks
+                }
+                // cache the combined master volume and stream type volume for fast mixer; this
+                // lacks any synchronization or barrier so VolumeProvider may read a stale value
+                track->mCachedVolume = track->isMuted() ?
+                        0 : masterVolume * mStreamTypes[track->streamType()].volume;
+                ++fastTracks;
+            } else {
+                // was it previously active?
+                if (state->mTrackMask & (1 << j)) {
+                    fastTrack->mBufferProvider = NULL;
+                    fastTrack->mGeneration++;
+                    state->mTrackMask &= ~(1 << j);
+                    didModify = true;
+                    // If any fast tracks were removed, we must wait for acknowledgement
+                    // because we're about to decrement the last sp<> on those tracks.
+                    block = FastMixerStateQueue::BLOCK_UNTIL_ACKED;
+                }
+                // Remainder of this block is copied from similar code for normal tracks
+                if (track->isStopped()) {
+                    // Can't reset directly, as fast mixer is still polling this track
+                    //   track->reset();
+                    // So instead mark this track as needing to be reset after push with ack
+                    resetMask |= 1 << i;
+                }
+                // This would be incomplete if we auto-paused on underrun
+                size_t audioHALFrames =
+                        (mOutput->stream->get_latency(mOutput->stream)*mSampleRate) / 1000;
+                size_t framesWritten =
+                        mBytesWritten / audio_stream_frame_size(&mOutput->stream->common);
+                if (track->presentationComplete(framesWritten, audioHALFrames)) {
+                    tracksToRemove->add(track);
+                }
+                // Avoids a misleading display in dumpsys
+                track->mObservedUnderruns &= ~1;
             }
             continue;
         }
@@ -2776,11 +2844,11 @@
         // make sure that we have enough frames to mix one full buffer.
         // enforce this condition only once to enable draining the buffer in case the client
         // app does not call stop() and relies on underrun to stop:
-        // hence the test on (mPrevMixerStatus == MIXER_TRACKS_READY) meaning the track was mixed
+        // hence the test on (mMixerStatus == MIXER_TRACKS_READY) meaning the track was mixed
         // during last round
         uint32_t minFrames = 1;
-        if (!track->isStopped() && !track->isPausing() &&
-                (mPrevMixerStatus == MIXER_TRACKS_READY)) {
+        if ((track->sharedBuffer() == 0) && !track->isStopped() && !track->isPausing() &&
+                (mMixerStatusIgnoringFastTracks == MIXER_TRACKS_READY)) {
             if (t->sampleRate() == (int)mSampleRate) {
                 minFrames = mNormalFrameCount;
             } else {
@@ -2929,7 +2997,7 @@
             // If one track is ready, set the mixer ready if:
             //  - the mixer was not ready during previous round OR
             //  - no other track is not ready
-            if (mPrevMixerStatus != MIXER_TRACKS_READY ||
+            if (mMixerStatusIgnoringFastTracks != MIXER_TRACKS_READY ||
                     mixerStatus != MIXER_TRACKS_ENABLED) {
                 mixerStatus = MIXER_TRACKS_READY;
             }
@@ -2938,7 +3006,8 @@
             if (track->isStopped()) {
                 track->reset();
             }
-            if (track->isTerminated() || track->isStopped() || track->isPaused()) {
+            if ((track->sharedBuffer() != 0) || track->isTerminated() ||
+                    track->isStopped() || track->isPaused()) {
                 // We have consumed all the buffers of this track.
                 // Remove it from the list of active tracks.
                 // TODO: use actual buffer filling status instead of latency when available from
@@ -2956,12 +3025,13 @@
                 if (--(track->mRetryCount) <= 0) {
                     ALOGV("BUFFER TIMEOUT: remove(%d) from active list on thread %p", name, this);
                     tracksToRemove->add(track);
-                    // indicate to client process that the track was disabled because of underrun
+                    // indicate to client process that the track was disabled because of underrun;
+                    // it will then automatically call start() when data is available
                     android_atomic_or(CBLK_DISABLED_ON, &cblk->flags);
                 // If one track is not ready, mark the mixer also not ready if:
                 //  - the mixer was ready during previous round OR
                 //  - no other track is ready
-                } else if (mPrevMixerStatus == MIXER_TRACKS_READY ||
+                } else if (mMixerStatusIgnoringFastTracks == MIXER_TRACKS_READY ||
                                 mixerStatus != MIXER_TRACKS_READY) {
                     mixerStatus = MIXER_TRACKS_ENABLED;
                 }
@@ -2974,7 +3044,41 @@
 
     }
 
-    // FIXME Here is where we would push the new FastMixer state if necessary
+    // Push the new FastMixer state if necessary
+    if (didModify) {
+        state->mFastTracksGen++;
+        // if the fast mixer was active, but now there are no fast tracks, then put it in cold idle
+        if (kUseFastMixer == FastMixer_Dynamic &&
+                state->mCommand == FastMixerState::MIX_WRITE && state->mTrackMask <= 1) {
+            state->mCommand = FastMixerState::COLD_IDLE;
+            state->mColdFutexAddr = &mFastMixerFutex;
+            state->mColdGen++;
+            mFastMixerFutex = 0;
+            if (kUseFastMixer == FastMixer_Dynamic) {
+                mNormalSink = mOutputSink;
+            }
+            // If we go into cold idle, need to wait for acknowledgement
+            // so that fast mixer stops doing I/O.
+            block = FastMixerStateQueue::BLOCK_UNTIL_ACKED;
+        }
+        sq->end();
+    }
+    if (sq != NULL) {
+        sq->end(didModify);
+        sq->push(block);
+    }
+
+    // Now perform the deferred reset on fast tracks that have stopped
+    while (resetMask != 0) {
+        size_t i = __builtin_ctz(resetMask);
+        ALOG_ASSERT(i < count);
+        resetMask &= ~(1 << i);
+        sp<Track> t = mActiveTracks[i].promote();
+        if (t == 0) continue;
+        Track* track = t.get();
+        ALOG_ASSERT(track->isFastTrack() && track->isStopped());
+        track->reset();
+    }
 
     // remove all the tracks that need to be...
     count = tracksToRemove->size();
@@ -3004,6 +3108,7 @@
     }
 
     // if any fast tracks, then status is ready
+    mMixerStatusIgnoringFastTracks = mixerStatus;
     if (fastTracks > 0) {
         mixerStatus = MIXER_TRACKS_READY;
     }
@@ -3887,6 +3992,7 @@
 {
     buffer->raw = NULL;
     mFrameCount = buffer->frameCount;
+    // FIXME See note at getNextBuffer()
     (void) step();      // ignore return value of step()
     buffer->frameCount = 0;
 }
@@ -3968,6 +4074,8 @@
     mPresentationCompleteFrames(0),
     mFlags(flags),
     mFastIndex(-1),
+    mObservedUnderruns(0),
+    mUnderrunCount(0),
     mCachedVolume(1.0)
 {
     if (mCblk != NULL) {
@@ -3979,18 +4087,22 @@
             ALOG_ASSERT(thread->mFastTrackAvailMask != 0);
             int i = __builtin_ctz(thread->mFastTrackAvailMask);
             ALOG_ASSERT(0 < i && i < FastMixerState::kMaxFastTracks);
+            // FIXME This is too eager.  We allocate a fast track index before the
+            //       fast track becomes active.  Since fast tracks are a scarce resource,
+            //       this means we are potentially denying other more important fast tracks from
+            //       being created.  It would be better to allocate the index dynamically.
             mFastIndex = i;
+            // Read the initial underruns because this field is never cleared by the fast mixer
+            mObservedUnderruns = thread->getFastTrackUnderruns(i) & ~1;
             thread->mFastTrackAvailMask &= ~(1 << i);
-            // Although we've allocated an index, we can't mutate or push a new fast track state
-            // here, because that data structure can only be changed within the normal mixer
-            // threadLoop().  So instead, make a note to mutate and push later.
-            thread->mFastTrackNewArray[i] = this;
-            thread->mFastTrackNewMask |= 1 << i;
         }
         // to avoid leaking a track name, do not allocate one unless there is an mCblk
         mName = thread->getTrackName_l((audio_channel_mask_t)channelMask);
         if (mName < 0) {
             ALOGE("no more track names available");
+            // FIXME bug - if sufficient fast track indices, but insufficient normal mixer names,
+            // then we leak a fast track index.  Should swap these two sections, or better yet
+            // only allocate a normal mixer name for normal tracks.
         }
     }
     ALOGV("Track constructor name %d, calling pid %d", mName, IPCThreadState::self()->getCallingPid());
@@ -4038,22 +4150,59 @@
     }
 }
 
+/*static*/ void AudioFlinger::PlaybackThread::Track::appendDumpHeader(String8& result)
+{
+    result.append("   Name Client Type Fmt Chn mask   Session Frames S M F SRate  L dB  R dB  "
+                  "  Server      User     Main buf    Aux Buf  FastUnder\n");
+
+}
+
 void AudioFlinger::PlaybackThread::Track::dump(char* buffer, size_t size)
 {
     uint32_t vlr = mCblk->getVolumeLR();
     if (isFastTrack()) {
-        strcpy(buffer, "   fast");
+        sprintf(buffer, "   F %2d", mFastIndex);
     } else {
         sprintf(buffer, "   %4d", mName - AudioMixer::TRACK0);
     }
-    snprintf(&buffer[7], size-7, " %6d %4u %3u 0x%08x %7u %6u %1d %1d %1d %5u %5.2g %5.2g  0x%08x 0x%08x 0x%08x 0x%08x\n",
+    track_state state = mState;
+    char stateChar;
+    switch (state) {
+    case IDLE:
+        stateChar = 'I';
+        break;
+    case TERMINATED:
+        stateChar = 'T';
+        break;
+    case STOPPED:
+        stateChar = 'S';
+        break;
+    case RESUMING:
+        stateChar = 'R';
+        break;
+    case ACTIVE:
+        stateChar = 'A';
+        break;
+    case PAUSING:
+        stateChar = 'p';
+        break;
+    case PAUSED:
+        stateChar = 'P';
+        break;
+    default:
+        stateChar = '?';
+        break;
+    }
+    bool nowInUnderrun = mObservedUnderruns & 1;
+    snprintf(&buffer[7], size-7, " %6d %4u %3u 0x%08x %7u %6u %1c %1d %1d %5u %5.2g %5.2g  "
+            "0x%08x 0x%08x 0x%08x 0x%08x %9u%c\n",
             (mClient == 0) ? getpid_cached : mClient->pid(),
             mStreamType,
             mFormat,
             mChannelMask,
             mSessionId,
             mFrameCount,
-            mState,
+            stateChar,
             mMute,
             mFillingUpStatus,
             mCblk->sampleRate,
@@ -4062,7 +4211,9 @@
             mCblk->server,
             mCblk->user,
             (int)mMainBuffer,
-            (int)mAuxBuffer);
+            (int)mAuxBuffer,
+            mUnderrunCount,
+            nowInUnderrun ? '*' : ' ');
 }
 
 // AudioBufferProvider interface
@@ -4075,11 +4226,19 @@
 
     // Check if last stepServer failed, try to step now
     if (mStepServerFailed) {
+        // FIXME When called by fast mixer, this takes a mutex with tryLock().
+        //       Since the fast mixer is higher priority than client callback thread,
+        //       it does not result in priority inversion for client.
+        //       But a non-blocking solution would be preferable to avoid
+        //       fast mixer being unable to tryLock(), and
+        //       to avoid the extra context switches if the client wakes up,
+        //       discovers the mutex is locked, then has to wait for fast mixer to unlock.
         if (!step())  goto getNextBuffer_exit;
         ALOGV("stepServer recovered");
         mStepServerFailed = false;
     }
 
+    // FIXME Same as above
     framesReady = cblk->framesReady();
 
     if (CC_LIKELY(framesReady)) {
@@ -4108,10 +4267,19 @@
     return NOT_ENOUGH_DATA;
 }
 
-uint32_t AudioFlinger::PlaybackThread::Track::framesReady() const {
+// Note that framesReady() takes a mutex on the control block using tryLock().
+// This could result in priority inversion if framesReady() is called by the normal mixer,
+// as the normal mixer thread runs at lower
+// priority than the client's callback thread:  there is a short window within framesReady()
+// during which the normal mixer could be preempted, and the client callback would block.
+// Another problem can occur if framesReady() is called by the fast mixer:
+// the tryLock() could block for up to 1 ms, and a sequence of these could delay fast mixer.
+// FIXME Replace AudioTrackShared control block implementation by a non-blocking FIFO queue.
+size_t AudioFlinger::PlaybackThread::Track::framesReady() const {
     return mCblk->framesReady();
 }
 
+// Don't call for fast tracks; the framesReady() could result in priority inversion
 bool AudioFlinger::PlaybackThread::Track::isReady() const {
     if (mFillingUpStatus != FS_FILLING || isStopped() || isPausing()) return true;
 
@@ -4842,7 +5010,7 @@
     buffer->frameCount = 0;
 }
 
-uint32_t AudioFlinger::PlaybackThread::TimedTrack::framesReady() const {
+size_t AudioFlinger::PlaybackThread::TimedTrack::framesReady() const {
     Mutex::Autolock _l(mTimedBufferQueueLock);
     return mFramesPendingInQueue;
 }
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index 23fc74d..f10295f 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -46,6 +46,7 @@
 #include <hardware/audio_policy.h>
 
 #include "AudioBufferProvider.h"
+#include "ExtendedAudioBufferProvider.h"
 #include "FastMixer.h"
 #include "NBAIO.h"
 
@@ -355,7 +356,7 @@
         void clearPowerManager();
 
         // base for record and playback
-        class TrackBase : public AudioBufferProvider, public RefBase {
+        class TrackBase : public ExtendedAudioBufferProvider, public RefBase {
 
         public:
             enum track_state {
@@ -396,6 +397,10 @@
             virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer, int64_t pts) = 0;
             virtual void releaseBuffer(AudioBufferProvider::Buffer* buffer);
 
+            // ExtendedAudioBufferProvider interface is only needed for Track,
+            // but putting it in TrackBase avoids the complexity of virtual inheritance
+            virtual size_t  framesReady() const { return SIZE_MAX; }
+
             audio_format_t format() const {
                 return mFormat;
             }
@@ -676,6 +681,7 @@
                                         IAudioFlinger::track_flags_t flags);
             virtual             ~Track();
 
+            static  void        appendDumpHeader(String8& result);
                     void        dump(char* buffer, size_t size);
             virtual status_t    start(AudioSystem::sync_event_t event = AudioSystem::SYNC_EVENT_NONE,
                                      int triggerSession = 0);
@@ -699,11 +705,6 @@
                     int16_t     *mainBuffer() const { return mMainBuffer; }
                     int         auxEffectId() const { return mAuxEffectId; }
 
-#if 0
-                    bool        isFastTrack() const
-                            { return (mFlags & IAudioFlinger::TRACK_FAST) != 0; }
-#endif
-
         // implement FastMixerState::VolumeProvider interface
             virtual uint32_t    getVolumeLR();
 
@@ -720,7 +721,7 @@
             virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer, int64_t pts = kInvalidPTS);
             // releaseBuffer() not overridden
 
-            virtual uint32_t framesReady() const;
+            virtual size_t framesReady() const;
 
             bool isMuted() const { return mMute; }
             bool isPausing() const {
@@ -729,6 +730,9 @@
             bool isPaused() const {
                 return mState == PAUSED;
             }
+            bool isResuming() const {
+                return mState == RESUMING;
+            }
             bool isReady() const;
             void setPaused() { mState = PAUSED; }
             void reset();
@@ -737,6 +741,8 @@
                 return (mStreamType == AUDIO_STREAM_CNT);
             }
 
+            sp<IMemory> sharedBuffer() const { return mSharedBuffer; }
+
             bool presentationComplete(size_t framesWritten, size_t audioHalFrames);
             void triggerEvents(AudioSystem::sync_event_t type);
 
@@ -754,7 +760,9 @@
             const sp<IMemory>   mSharedBuffer;
             bool                mResetDone;
             const audio_stream_type_t mStreamType;
-            int                 mName;      // track name on the normal mixer
+            int                 mName;      // track name on the normal mixer,
+                                            // allocated statically at track creation time,
+                                            // and is even allocated (though unused) for fast tracks
             int16_t             *mMainBuffer;
             int32_t             *mAuxBuffer;
             int                 mAuxEffectId;
@@ -763,7 +771,17 @@
                                                        // when this track will be fully rendered
         private:
             IAudioFlinger::track_flags_t mFlags;
-            int                 mFastIndex; // index within FastMixerState::mFastTracks[] or -1
+
+            // The following fields are only for fast tracks, and should be in a subclass
+            int                 mFastIndex; // index within FastMixerState::mFastTracks[];
+                                            // either mFastIndex == -1
+                                            // or 0 < mFastIndex < FastMixerState::kMaxFast because
+                                            // index 0 is reserved for normal mixer's submix;
+                                            // index is allocated statically at track creation time
+                                            // but the slot is only used if track is active
+            uint32_t            mObservedUnderruns; // Most recently observed value of
+                                            // mFastMixerDumpState.mTracks[mFastIndex].mUnderruns
+            uint32_t            mUnderrunCount; // Counter of total number of underruns, never reset
             volatile float      mCachedVolume;  // combined master volume and stream type volume;
                                                 // 'volatile' means accessed without lock or
                                                 // barrier, but is read/written atomically
@@ -798,7 +816,7 @@
 
             // Mixer facing methods.
             virtual bool isTimedTrack() const { return true; }
-            virtual uint32_t framesReady() const;
+            virtual size_t framesReady() const;
 
             // AudioBufferProvider interface
             virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer,
@@ -919,9 +937,9 @@
         virtual     void        threadLoop_standby();
         virtual     void        threadLoop_removeTracks(const Vector< sp<Track> >& tracksToRemove) { }
 
-                    // prepareTracks_l reads and writes mActiveTracks, and also returns the
-                    // pending set of tracks to remove via Vector 'tracksToRemove'.  The caller is
-                    // responsible for clearing or destroying this Vector later on, when it
+                    // prepareTracks_l reads and writes mActiveTracks, and returns
+                    // the pending set of tracks to remove via Vector 'tracksToRemove'.  The caller
+                    // is responsible for clearing or destroying this Vector later on, when it
                     // is safe to do so. That will drop the final ref count and destroy the tracks.
         virtual     mixer_state prepareTracks_l(Vector< sp<Track> > *tracksToRemove) = 0;
 
@@ -991,7 +1009,7 @@
         bool                            mMasterMute;
                     void        setMasterMute_l(bool muted) { mMasterMute = muted; }
     protected:
-        SortedVector< wp<Track> >       mActiveTracks;
+        SortedVector< wp<Track> >       mActiveTracks;  // FIXME check if this could be sp<>
 
         // Allocate a track name for a given channel mask.
         //   Returns name >= 0 if successful, -1 on failure.
@@ -1052,8 +1070,10 @@
         uint32_t                        sleepTime;
 
         // mixer status returned by prepareTracks_l()
-        mixer_state                     mMixerStatus;       // current cycle
-        mixer_state                     mPrevMixerStatus;   // previous cycle
+        mixer_state                     mMixerStatus; // current cycle
+                                                      // previous cycle when in prepareTracks_l()
+        mixer_state                     mMixerStatusIgnoringFastTracks;
+                                                      // FIXME or a separate ready state per track
 
         // FIXME move these declarations into the specific sub-class that needs them
         // MIXER only
@@ -1078,12 +1098,11 @@
         sp<NBAIO_Sink>          mNormalSink;
     public:
         virtual     bool        hasFastMixer() const = 0;
+        virtual     uint32_t    getFastTrackUnderruns(size_t fastIndex) const { return 0; }
 
     protected:
                     // accessed by both binder threads and within threadLoop(), lock on mutex needed
                     unsigned    mFastTrackAvailMask;    // bit i set if fast track [i] is available
-                    unsigned    mFastTrackNewMask;      // bit i set if fast track [i] just created
-                    Track*      mFastTrackNewArray[FastMixerState::kMaxFastTracks];
 
     };
 
@@ -1134,6 +1153,11 @@
 
     public:
         virtual     bool        hasFastMixer() const { return mFastMixer != NULL; }
+        virtual     uint32_t    getFastTrackUnderruns(size_t fastIndex) const {
+                                    ALOG_ASSERT(0 < fastIndex &&
+                                            fastIndex < FastMixerState::kMaxFastTracks);
+                                    return mFastMixerDumpState.mTracks[fastIndex].mUnderruns;
+                                }
     };
 
     class DirectOutputThread : public PlaybackThread {
diff --git a/services/audioflinger/ExtendedAudioBufferProvider.h b/services/audioflinger/ExtendedAudioBufferProvider.h
new file mode 100644
index 0000000..88279b4
--- /dev/null
+++ b/services/audioflinger/ExtendedAudioBufferProvider.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_EXTENDED_AUDIO_BUFFER_PROVIDER_H
+#define ANDROID_EXTENDED_AUDIO_BUFFER_PROVIDER_H
+
+#include "AudioBufferProvider.h"
+
+namespace android {
+
+class ExtendedAudioBufferProvider : public AudioBufferProvider {
+public:
+    virtual size_t  framesReady() const = 0;  // see description at AudioFlinger.h
+};
+
+}   // namespace android
+
+#endif  // ANDROID_EXTENDED_AUDIO_BUFFER_PROVIDER_H
diff --git a/services/audioflinger/FastMixer.cpp b/services/audioflinger/FastMixer.cpp
index 841b06a..bf264be 100644
--- a/services/audioflinger/FastMixer.cpp
+++ b/services/audioflinger/FastMixer.cpp
@@ -29,6 +29,7 @@
 
 #define FAST_HOT_IDLE_NS     1000000L   // 1 ms: time to sleep while hot idling
 #define FAST_DEFAULT_NS    999999999L   // ~1 sec: default time to sleep
+#define MAX_WARMUP_CYCLES         10    // maximum number of loop cycles to wait for warmup
 
 namespace android {
 
@@ -58,8 +59,9 @@
     unsigned sampleRate = 0;
     int fastTracksGen = 0;
     long periodNs = 0;      // expected period; the time required to render one mix buffer
-    long underrunNs = 0;    // an underrun is likely if an actual cycle is greater than this value
-    long overrunNs = 0;     // an overrun is likely if an actual cycle if less than this value
+    long underrunNs = 0;    // underrun likely when write cycle is greater than this value
+    long overrunNs = 0;     // overrun likely when write cycle is less than this value
+    long warmupNs = 0;      // warmup complete when write cycle is greater than to this value
     FastMixerDumpState dummyDumpState, *dumpState = &dummyDumpState;
     bool ignoreNextOverrun = true;  // used to ignore initial overrun and first after an underrun
 #ifdef FAST_MIXER_STATISTICS
@@ -67,6 +69,9 @@
     static const unsigned kMaxSamples = 1000;
 #endif
     unsigned coldGen = 0;   // last observed mColdGen
+    bool isWarm = false;    // true means ready to mix, false means wait for warmup before mixing
+    struct timespec measuredWarmupTs = {0, 0};  // how long did it take for warmup to complete
+    uint32_t warmupCycles = 0;  // counter of number of loop cycles required to warmup
 
     for (;;) {
 
@@ -138,6 +143,12 @@
                 if (old <= 0) {
                     __futex_syscall4(coldFutexAddr, FUTEX_WAIT_PRIVATE, old - 1, NULL);
                 }
+                // This may be overly conservative; there could be times that the normal mixer
+                // requests such a brief cold idle that it doesn't require resetting this flag.
+                isWarm = false;
+                measuredWarmupTs.tv_sec = 0;
+                measuredWarmupTs.tv_nsec = 0;
+                warmupCycles = 0;
                 sleepNs = -1;
                 coldGen = current->mColdGen;
             } else {
@@ -195,6 +206,7 @@
                     periodNs = (frameCount * 1000000000LL) / sampleRate;    // 1.00
                     underrunNs = (frameCount * 1750000000LL) / sampleRate;  // 1.75
                     overrunNs = (frameCount * 250000000LL) / sampleRate;    // 0.25
+                    warmupNs = (frameCount * 500000000LL) / sampleRate;     // 0.50
                 } else {
                     periodNs = 0;
                     underrunNs = 0;
@@ -226,6 +238,7 @@
                     i = __builtin_ctz(removedTracks);
                     removedTracks &= ~(1 << i);
                     const FastTrack* fastTrack = &current->mFastTracks[i];
+                    ALOG_ASSERT(fastTrack->mBufferProvider == NULL);
                     if (mixer != NULL) {
                         name = fastTrackNames[i];
                         ALOG_ASSERT(name >= 0);
@@ -234,6 +247,7 @@
 #if !LOG_NDEBUG
                     fastTrackNames[i] = -1;
 #endif
+                    // don't reset track dump state, since other side is ignoring it
                     generations[i] = fastTrack->mGeneration;
                 }
 
@@ -313,13 +327,13 @@
         }
 
         // do work using current state here
-        if ((command & FastMixerState::MIX) && (mixer != NULL)) {
+        if ((command & FastMixerState::MIX) && (mixer != NULL) && isWarm) {
             ALOG_ASSERT(mixBuffer != NULL);
-            // update volumes
-            unsigned volumeTracks = current->mTrackMask;
-            while (volumeTracks != 0) {
-                i = __builtin_ctz(volumeTracks);
-                volumeTracks &= ~(1 << i);
+            // for each track, update volume and check for underrun
+            unsigned currentTrackMask = current->mTrackMask;
+            while (currentTrackMask != 0) {
+                i = __builtin_ctz(currentTrackMask);
+                currentTrackMask &= ~(1 << i);
                 const FastTrack* fastTrack = &current->mFastTracks[i];
                 int name = fastTrackNames[i];
                 ALOG_ASSERT(name >= 0);
@@ -330,6 +344,25 @@
                     mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME1,
                             (void *)(vlr >> 16));
                 }
+                // FIXME The current implementation of framesReady() for fast tracks
+                // takes a tryLock, which can block
+                // up to 1 ms.  If enough active tracks all blocked in sequence, this would result
+                // in the overall fast mix cycle being delayed.  Should use a non-blocking FIFO.
+                size_t framesReady = fastTrack->mBufferProvider->framesReady();
+                FastTrackDump *ftDump = &dumpState->mTracks[i];
+                uint32_t underruns = ftDump->mUnderruns;
+                if (framesReady < frameCount) {
+                    ftDump->mUnderruns = (underruns + 2) | 1;
+                    if (framesReady == 0) {
+                        mixer->disable(name);
+                    } else {
+                        // allow mixing partial buffer
+                        mixer->enable(name);
+                    }
+                } else if (underruns & 1) {
+                    ftDump->mUnderruns = underruns & ~1;
+                    mixer->enable(name);
+                }
             }
             // process() is CPU-bound
             mixer->process(AudioBufferProvider::kInvalidPTS);
@@ -337,6 +370,8 @@
         } else if (mixBufferState == MIXED) {
             mixBufferState = UNDEFINED;
         }
+        bool attemptedWrite = false;
+        //bool didFullWrite = false;    // dumpsys could display a count of partial writes
         if ((command & FastMixerState::WRITE) && (outputSink != NULL) && (mixBuffer != NULL)) {
             if (mixBufferState == UNDEFINED) {
                 memset(mixBuffer, 0, frameCount * 2 * sizeof(short));
@@ -348,10 +383,15 @@
             ssize_t framesWritten = outputSink->write(mixBuffer, frameCount);
             dumpState->mWriteSequence++;
             if (framesWritten >= 0) {
+                ALOG_ASSERT(framesWritten <= frameCount);
                 dumpState->mFramesWritten += framesWritten;
+                //if ((size_t) framesWritten == frameCount) {
+                //    didFullWrite = true;
+                //}
             } else {
                 dumpState->mWriteErrors++;
             }
+            attemptedWrite = true;
             // FIXME count # of writes blocked excessively, CPU usage, etc. for dump
         }
 
@@ -368,6 +408,27 @@
                     --sec;
                     nsec += 1000000000;
                 }
+                // To avoid an initial underrun on fast tracks after exiting standby,
+                // do not start pulling data from tracks and mixing until warmup is complete.
+                // Warmup is considered complete after the earlier of:
+                //      first successful single write() that blocks for more than warmupNs
+                //      MAX_WARMUP_CYCLES write() attempts.
+                // This is overly conservative, but to get better accuracy requires a new HAL API.
+                if (!isWarm && attemptedWrite) {
+                    measuredWarmupTs.tv_sec += sec;
+                    measuredWarmupTs.tv_nsec += nsec;
+                    if (measuredWarmupTs.tv_nsec >= 1000000000) {
+                        measuredWarmupTs.tv_sec++;
+                        measuredWarmupTs.tv_nsec -= 1000000000;
+                    }
+                    ++warmupCycles;
+                    if ((attemptedWrite && nsec > warmupNs) ||
+                            (warmupCycles >= MAX_WARMUP_CYCLES)) {
+                        isWarm = true;
+                        dumpState->mMeasuredWarmupTs = measuredWarmupTs;
+                        dumpState->mWarmupCycles = warmupCycles;
+                    }
+                }
                 if (sec > 0 || nsec > underrunNs) {
                     // FIXME only log occasionally
                     ALOGV("underrun: time since last cycle %d.%03ld sec",
@@ -421,11 +482,13 @@
 FastMixerDumpState::FastMixerDumpState() :
     mCommand(FastMixerState::INITIAL), mWriteSequence(0), mFramesWritten(0),
     mNumTracks(0), mWriteErrors(0), mUnderruns(0), mOverruns(0),
-    mSampleRate(0), mFrameCount(0)
+    mSampleRate(0), mFrameCount(0), /* mMeasuredWarmupTs({0, 0}), */ mWarmupCycles(0)
 #ifdef FAST_MIXER_STATISTICS
     , mMean(0.0), mMinimum(0.0), mMaximum(0.0), mStddev(0.0)
 #endif
 {
+    mMeasuredWarmupTs.tv_sec = 0;
+    mMeasuredWarmupTs.tv_nsec = 0;
 }
 
 FastMixerDumpState::~FastMixerDumpState()
@@ -462,12 +525,14 @@
         snprintf(string, COMMAND_MAX, "%d", mCommand);
         break;
     }
+    double mMeasuredWarmupMs = (mMeasuredWarmupTs.tv_sec * 1000.0) +
+            (mMeasuredWarmupTs.tv_nsec / 1000000.0);
     fdprintf(fd, "FastMixer command=%s writeSequence=%u framesWritten=%u\n"
                  "          numTracks=%u writeErrors=%u underruns=%u overruns=%u\n"
-                 "          sampleRate=%u frameCount=%u\n",
+                 "          sampleRate=%u frameCount=%u measuredWarmup=%.3g ms, warmupCycles=%u\n",
                  string, mWriteSequence, mFramesWritten,
                  mNumTracks, mWriteErrors, mUnderruns, mOverruns,
-                 mSampleRate, mFrameCount);
+                 mSampleRate, mFrameCount, mMeasuredWarmupMs, mWarmupCycles);
 #ifdef FAST_MIXER_STATISTICS
     fdprintf(fd, "          cycle time in ms: mean=%.1f min=%.1f max=%.1f stddev=%.1f\n",
                  mMean*1e3, mMinimum*1e3, mMaximum*1e3, mStddev*1e3);
diff --git a/services/audioflinger/FastMixer.h b/services/audioflinger/FastMixer.h
index 8a8fcb8..a6dd310 100644
--- a/services/audioflinger/FastMixer.h
+++ b/services/audioflinger/FastMixer.h
@@ -42,8 +42,23 @@
 
 };  // class FastMixer
 
+// Represents the dump state of a fast track
+struct FastTrackDump {
+    FastTrackDump() : mUnderruns(0) { }
+    /*virtual*/ ~FastTrackDump() { }
+    uint32_t mUnderruns;        // Underrun status, represented as follows:
+                                //   bit 0 == 0 means not currently in underrun
+                                //   bit 0 == 1 means currently in underrun
+                                //   bits 1 to 31 == total number of underruns
+                                // Not reset to zero for new tracks or if track generation changes.
+                                // This representation is used to keep the information atomic.
+};
+
 // The FastMixerDumpState keeps a cache of FastMixer statistics that can be logged by dumpsys.
-// Since used non-atomically, only POD types are permitted, and the contents can't be trusted.
+// Each individual native word-sized field is accessed atomically.  But the
+// overall structure is non-atomic, that is there may be an inconsistency between fields.
+// No barriers or locks are used for either writing or reading.
+// Only POD types are permitted, and the contents shouldn't be trusted (i.e. do range checks).
 // It has a different lifetime than the FastMixer, and so it can't be a member of FastMixer.
 struct FastMixerDumpState {
     FastMixerDumpState();
@@ -60,6 +75,9 @@
     uint32_t mOverruns;         // total number of overruns
     uint32_t mSampleRate;
     size_t   mFrameCount;
+    struct timespec mMeasuredWarmupTs;  // measured warmup time
+    uint32_t mWarmupCycles;     // number of loop cycles required to warmup
+    FastTrackDump   mTracks[FastMixerState::kMaxFastTracks];
 #ifdef FAST_MIXER_STATISTICS
     // cycle times in seconds
     float    mMean;
diff --git a/services/audioflinger/FastMixerState.h b/services/audioflinger/FastMixerState.h
index 83094c8..ce0cdb5 100644
--- a/services/audioflinger/FastMixerState.h
+++ b/services/audioflinger/FastMixerState.h
@@ -18,7 +18,7 @@
 #define ANDROID_AUDIO_FAST_MIXER_STATE_H
 
 #include <system/audio.h>
-#include "AudioBufferProvider.h"
+#include "ExtendedAudioBufferProvider.h"
 #include "NBAIO.h"
 
 namespace android {
@@ -40,7 +40,7 @@
     FastTrack();
     /*virtual*/ ~FastTrack();
 
-    AudioBufferProvider*    mBufferProvider; // must not be NULL
+    ExtendedAudioBufferProvider* mBufferProvider; // must be NULL if inactive, or non-NULL if active
     VolumeProvider*         mVolumeProvider; // optional; if NULL then full-scale
     unsigned                mSampleRate;     // optional; if zero then use mixer sample rate
     audio_channel_mask_t    mChannelMask;    // AUDIO_CHANNEL_OUT_MONO or AUDIO_CHANNEL_OUT_STEREO
@@ -57,7 +57,7 @@
     // all pointer fields use raw pointers; objects are owned and ref-counted by the normal mixer
     FastTrack   mFastTracks[kMaxFastTracks];
     int         mFastTracksGen; // increment when any mFastTracks[i].mGeneration is incremented
-    unsigned    mTrackMask;     // bit i is set if and only if mFastTracks[i] != NULL
+    unsigned    mTrackMask;     // bit i is set if and only if mFastTracks[i] is active
     NBAIO_Sink* mOutputSink;    // HAL output device, must already be negotiated
     int         mOutputSinkGen; // increment when mOutputSink is assigned
     size_t      mFrameCount;    // number of frames per fast mix buffer
diff --git a/services/audioflinger/SourceAudioBufferProvider.cpp b/services/audioflinger/SourceAudioBufferProvider.cpp
index e9e8c16..e9d6d2c 100644
--- a/services/audioflinger/SourceAudioBufferProvider.cpp
+++ b/services/audioflinger/SourceAudioBufferProvider.cpp
@@ -95,4 +95,10 @@
     mGetCount = 0;
 }
 
+size_t SourceAudioBufferProvider::framesReady() const
+{
+    ssize_t avail = mSource->availableToRead();
+    return avail < 0 ? 0 : (size_t) avail;
+}
+
 }   // namespace android
diff --git a/services/audioflinger/SourceAudioBufferProvider.h b/services/audioflinger/SourceAudioBufferProvider.h
index 3219d78..85ccbb2 100644
--- a/services/audioflinger/SourceAudioBufferProvider.h
+++ b/services/audioflinger/SourceAudioBufferProvider.h
@@ -20,11 +20,11 @@
 #define ANDROID_SOURCE_AUDIO_BUFFER_PROVIDER_H
 
 #include "NBAIO.h"
-#include "AudioBufferProvider.h"
+#include "ExtendedAudioBufferProvider.h"
 
 namespace android {
 
-class SourceAudioBufferProvider : public AudioBufferProvider {
+class SourceAudioBufferProvider : public ExtendedAudioBufferProvider {
 
 public:
     SourceAudioBufferProvider(const sp<NBAIO_Source>& source);
@@ -34,6 +34,9 @@
     virtual status_t getNextBuffer(Buffer *buffer, int64_t pts);
     virtual void     releaseBuffer(Buffer *buffer);
 
+    // ExtendedAudioBufferProvider interface
+    virtual size_t   framesReady() const;
+
 private:
     const sp<NBAIO_Source> mSource;     // the wrapped source
     /*const*/ size_t    mFrameBitShift; // log2(frame size in bytes)